blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
c4ef0a5ad842febae7dc7d0f6b86210f665d8c52
71b7b6d84a61f514b038fac7741e6d16973fcaa9
/devel/lib/python2.7/dist-packages/object_manipulation_msgs/msg/_GraspHandPostureExecutionFeedback.py
239a1161abae63cd1fec56383bbec0f1c7153957
[]
no_license
YiKangJ/perception_driven_ws
15c02e523f1a708fe63b216d73019c8c2bde97a1
0a0f8fcbe3f5fed26439f449999b85f1e38c0f70
refs/heads/master
2020-04-01T19:47:48.372111
2018-10-18T06:17:57
2018-10-18T06:17:57
153,571,793
0
0
null
null
null
null
UTF-8
Python
false
false
165
py
/home/jyk/perception_driven_ws/devel/.private/object_manipulation_msgs/lib/python2.7/dist-packages/object_manipulation_msgs/msg/_GraspHandPostureExecutionFeedback.py
[ "jinyikangjyk@163.com" ]
jinyikangjyk@163.com
38ad82c1a122a32358a5cad4b79af8911d55b7e9
fae68b144a7020d9731896cab5d62a97ce25ae91
/image_augment.py
f40bdbedcb9a6163c36e3ee0a6dea31f1c328f5f
[]
no_license
brandon-castaing-ucb/SecuriBot
a3dc41591fe66dd51e99374cd9bc1661f49487e5
fcd70e6f9c35fef9332b6c9a1adfa9002fcf9fa4
refs/heads/master
2023-07-24T23:21:42.365678
2020-01-21T03:38:58
2020-01-21T03:38:58
220,586,158
0
0
null
2023-07-06T21:47:10
2019-11-09T03:30:21
Python
UTF-8
Python
false
false
3,726
py
import sys, os, re, traceback from os.path import isfile from multiprocessing.dummy import Pool from augmentations.fliph import FlipH from augmentations.zoom import Zoom from augmentations.blur import Blur from augmentations.noise import Noise from augmentations.translate import Translate from skimage.io import imread, imsave class ImageAugment: EXTENSIONS = ['png', 'jpg', 'jpeg', 'bmp'] WORKER_COUNT = max(4, 1) OPERATIONS = [FlipH, Translate, Noise, Zoom, Blur] ''' ## Leveraging image augmnetation code from HW7 ## Augmented files will have names matching the regex below, eg original__rot90__crop1__flipv.jpg ''' AUGMENTED_FILE_REGEX = re.compile('^.*(__.+)+\\.[^\\.]+$') EXTENSION_REGEX = re.compile('|'.join(map(lambda n : '.*\\.' + n + '$', EXTENSIONS))) thread_pool = None count = 0 @staticmethod def build_augmented_file_name(original_name, ops): root, ext = os.path.splitext(original_name) result = root for op in ops: result += '__' + op.code return result + ext @staticmethod def work(d, f, op_lists): try: in_path = os.path.join(d,f) for op_list in op_lists: out_file_name = ImageAugment.build_augmented_file_name(f, op_list) if isfile(os.path.join(d,out_file_name)): continue img = imread(in_path) for op in op_list: img = op.process(img) imsave(os.path.join(d, out_file_name), img) ImageAugment.count += 1 except: traceback.print_exc(file=sys.stdout) @staticmethod def process(dir, file, op_lists): ImageAugment.thread_pool.apply_async(ImageAugment.work, (dir, file, op_lists)) @staticmethod def execute(image_dir, op_codes): print('Starting image processing...') if not os.path.isdir(image_dir): print('Invalid image directory: {}'.format(image_dir)) return None op_lists = [] for op_code_list in op_codes: op_list = [] for op_code in op_code_list.split(','): op = None for op in ImageAugment.OPERATIONS: op = op.match_code(op_code) if op: op_list.append(op) break if not op: print('Unknown operation {}'.format(op_code)) return None op_lists.append(op_list) ImageAugment.thread_pool = Pool(ImageAugment.WORKER_COUNT) print('Thread pool initialised with {} worker{}'.format(ImageAugment.WORKER_COUNT, '' if ImageAugment.WORKER_COUNT == 1 else 's')) matches = [] for dir_info in os.walk(image_dir): dir_name, _, file_names = dir_info print('Processing {image_dir}/{}...'.format(dir_name)) for file_name in file_names: if ImageAugment.EXTENSION_REGEX.match(file_name): if ImageAugment.AUGMENTED_FILE_REGEX.match(file_name): print("Skipped Augmentation") else: ImageAugment.process(dir_name, file_name, op_lists) else: print("Skipped") print("Waiting for workers to complete...") ImageAugment.thread_pool.close() ImageAugment.thread_pool.join() print(f"Processed Images: {ImageAugment.count}") if __name__ == '__main__': ImageAugment().execute("../data", ["fliph","noise_0.01","noise_0.03","noise_0.05","trans_10_10","trans_20_20","blur_1.0","blur_2.0"])
[ "bcastaing@berkeley.edu" ]
bcastaing@berkeley.edu
8fd5e717b4d06d2f26535413e07fae832635769d
72e463c26daf79b7d380db59a58849e3cd095a7e
/week7/day1_api.py
f7bcb6d95489339333501141914115cb6d9975ba
[]
no_license
tdhuynh/tiy_class_notes
dcc5454af63ca888cfdb99e85f4370cabce88f88
a254d77f52cc438476d80ff58bfa9759de7826fa
refs/heads/master
2020-04-15T12:19:30.045552
2016-11-09T14:30:06
2016-11-09T14:30:06
68,213,599
0
0
null
null
null
null
UTF-8
Python
false
false
973
py
import requests # result = requests.get("http://swapi.co/api/people/") # # print(result.text) # json_result = result.json() # # print(json_result) # # print(json_result["name"]) # for person in json_result["results"]: # print(person["name"]) # result = requests.get(json_result["next"]) # json_result = result.json() # # for person in json_result["results"]: # print(person["name"]) ################### def get_data(endpoint, lookup="name"): url = "http://swapi.co/api/{}/".format(endpoint) while url: result = requests.get(url) json_result = result.json() for person in json_result["results"]: print(person[lookup]) if input("Press Enter to keep going, type 'n' to stop " ): break url = json_result["next"] while True: value = input("What do you want to search for? (films) or (people)? ") if value == "films": get_data(value, lookup="title") get_data(value)
[ "tommyhuynh93@gmail.com" ]
tommyhuynh93@gmail.com
c76e7b57a50d1595e23179c5dde7838452d683e2
95789a6503101b98548570f48e80ae12b964fff1
/rango/views.py
5fb1ab348828e249de3637bdc7da82f5648f0859
[]
no_license
NikolayBorovenskiy/How-to-Tango-with-Django
be7a2d0b8354f17e1ec98a7bc5714ea00b386f7a
f04c9e534c84de2b8885dbaaa5144f4d748c33a0
refs/heads/master
2020-05-14T15:49:32.317274
2015-08-09T06:46:07
2015-08-09T06:46:07
39,078,930
0
0
null
null
null
null
UTF-8
Python
false
false
12,340
py
from django.template import RequestContext from django.shortcuts import render_to_response, render, redirect from django.contrib.auth import authenticate, login, logout from django.http import HttpResponseRedirect, HttpResponse from django.contrib.auth.decorators import login_required from rango.models import Category, Page from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm from datetime import datetime from rango.bing_search import run_query def index(request): #test cookies request.session.set_test_cookie() # Request the context of the request. # The context contains information such as the client's machine details, for example. context = RequestContext(request) # Construct a dictionary to pass to the template engine as its context. # Note the key boldmessage is the same as {{ boldmessage }} in the template! category_list = Category.objects.order_by('-views')[:5] context_dict = {'categories': category_list} for category in category_list: category.url = category.name.replace(' ', '_') # Get the number of visits to the site. # We use the COOKIES.get() function to obtain the visits cookie. # If the cookie exists, the value returned is casted to an integer. # If the cookie doesn't exist, we default to zero and cast that. visits = int(request.COOKIES.get('visits', '1')) reset_last_visit_time = False response = render(request, 'rango/index.html', context_dict) # Does the cookie last_visit exist? if 'last_visit' in request.COOKIES: # Yes it does! Get the cookie's value. last_visit = request.COOKIES['last_visit'] # Cast the value to a Python date/time object. last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S") # If it's been more than a day since the last visit... if (datetime.now() - last_visit_time).seconds > 10: visits = visits + 1 context_dict['visits'] = visits response = render(request, 'rango/index.html', context_dict) # ...and flag that the cookie last visit needs to be updated reset_last_visit_time = True else: # Cookie last_visit doesn't exist, so flag that it should be set. reset_last_visit_time = True context_dict['visits'] = visits print visits #Obtain our Response object early so we can add cookie information. response = render(request, 'rango/index.html', context_dict) if reset_last_visit_time: response.set_cookie('last_visit', datetime.now()) response.set_cookie('visits', visits) # Return response back to the user, updating any cookies that need changed. return response #context = RequestContext(request) #category_name = category_name_slug.replace('_', ' ') #context_dict = {'category_name': category_name} def category(request, category_name_slug): context_dict = {} context_dict['result_list'] = None context_dict['query'] = None context = RequestContext(request) category_name = category_name_slug.replace('_', ' ') context_dict['category_name'] = category_name if request.method == 'POST': query = request.POST['query'].strip() if query: # Run our Bing function to get the results list! result_list = run_query(query) context_dict['result_list'] = result_list context_dict['query'] = query try: category = Category.objects.get(name=category_name) context_dict['category_name'] = category.name pages = Page.objects.filter(category=category).order_by('-views') context_dict['pages'] = pages context_dict['category'] = category except Category.DoesNotExist: pass if not context_dict['query']: context_dict['query'] = category.name return render(request, 'rango/category.html', context_dict) def add_category(request): # A HTTP POST? if request.method == 'POST': form = CategoryForm(request.POST) # Have we been provided with a valid form? if form.is_valid(): # Save the new category to the database. form.save(commit=True) # Now call the index() view. # The user will be shown the homepage. return index(request) else: # The supplied form contained errors - just print them to the terminal. print form.errors else: # If the request was not a POST, display the form to enter details. form = CategoryForm() # Bad form (or form details), no form supplied... # Render the form with error messages (if any). return render(request, 'rango/add_category.html', {'form': form}) def add_page(request, category_name_slug): try: cat = Category.objects.get(name=category_name_slug) except Category.DoesNotExist: cat = None if request.method == 'POST': form = PageForm(request.POST) if form.is_valid(): if cat: page = form.save(commit=False) page.category = cat page.views = 0 page.save() # probably better to use a redirect here. return category(request, category_name_slug) else: print form.errors else: form = PageForm() context_dict = {'form':form, 'category': cat} return render(request, 'rango/add_page.html', context_dict) def register(request): if request.session.test_cookie_worked(): print ">>>> TEST COOKIE WORKED!" request.session.delete_test_cookie() # A boolean value for telling the template whether the registration was successful. # Set to False initially. Code changes value to True when registration succeeds. registered = False # If it's a HTTP POST, we're interested in processing form data. if request.method == 'POST': # Attempt to grab information from the raw form information. # Note that we make use of both UserForm and UserProfileForm. user_form = UserForm(data=request.POST) profile_form = UserProfileForm(data=request.POST) # If the two forms are valid... if user_form.is_valid() and profile_form.is_valid(): # Save the user's form data to the database. user = user_form.save() # Now we hash the password with the set_password method. # Once hashed, we can update the user object. user.set_password(user.password) user.save() # Now sort out the UserProfile instance. # Since we need to set the user attribute ourselves, we set commit=False. # This delays saving the model until we're ready to avoid integrity problems. profile = profile_form.save(commit=False) profile.user = user # Did the user provide a profile picture? # If so, we need to get it from the input form and put it in the UserProfile model. if 'picture' in request.FILES: profile.picture = request.FILES['picture'] # Now we save the UserProfile model instance. profile.save() # Update our variable to tell the template registration was successful. registered = True # Invalid form or forms - mistakes or something else? # Print problems to the terminal. # They'll also be shown to the user. else: print user_form.errors, profile_form.errors # Not a HTTP POST, so we render our form using two ModelForm instances. # These forms will be blank, ready for user input. else: user_form = UserForm() profile_form = UserProfileForm() # Render the template depending on the context. return render(request, 'rango/register.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} ) def user_login(request): # If the request is a HTTP POST, try to pull out the relevant information. if request.method == 'POST': # Gather the username and password provided by the user. # This information is obtained from the login form. # We use request.POST.get('<variable>') as opposed to request.POST['<variable>'], # because the request.POST.get('<variable>') returns None, if the value does not exist, # while the request.POST['<variable>'] will raise key error exception username = request.POST.get('username') password = request.POST.get('password') # Use Django's machinery to attempt to see if the username/password # combination is valid - a User object is returned if it is. user = authenticate(username=username, password=password) # If we have a User object, the details are correct. # If None (Python's way of representing the absence of a value), no user # with matching credentials was found. if user: # Is the account active? It could have been disabled. if user.is_active: # If the account is valid and active, we can log the user in. # We'll send the user back to the homepage. login(request, user) return HttpResponseRedirect('/rango/') else: # An inactive account was used - no logging in! return HttpResponse("Your Rango account is disabled.") else: # Bad login details were provided. So we can't log the user in. print "Invalid login details: {0}, {1}".format(username, password) return HttpResponse("Invalid login details supplied.") # The request is not a HTTP POST, so display the login form. # This scenario would most likely be a HTTP GET. else: # No context variables to pass to the template system, hence the # blank dictionary object... return render(request, 'rango/login.html', {}) @login_required def restricted(request): return HttpResponse("Since you're logged in, you can see this text!") # Use the login_required() decorator to ensure only those logged in can access the view. @login_required def user_logout(request): # Since we know the user is logged in, we can now just log them out. logout(request) # Take the user back to the homepage. return HttpResponseRedirect('/rango/') def search(request): result_list = [] if request.method == 'POST': query = request.POST['query'].strip() if query: # Run our Bing function to get the results list! result_list = run_query(query) return render(request, 'rango/search.html', {'result_list': result_list}) def track_url(request): page_id = None url = '/rango/' if request.method == 'GET': if 'page_id' in request.GET: page_id = request.GET['page_id'] try: page = Page.objects.get(id=page_id) page.views = page.views + 1 page.save() url = page.url except: pass return redirect(url) @login_required def like_category(request): cat_id = None if request.method == 'GET': cat_id = request.GET['category_id'] likes = 0 if cat_id: cat = Category.objects.get(id=int(cat_id)) if cat: likes = cat.likes + 1 cat.likes = likes cat.save() return HttpResponse(likes) def get_category_list(max_results=0, starts_with=''): cat_list = [] if starts_with: cat_list = Category.objects.filter(name__istartswith=starts_with) if max_results > 0: if len(cat_list) > max_results: cat_list = cat_list[:max_results] return cat_list def suggest_category(request): cat_list = [] starts_with = '' if request.method == 'GET': starts_with = request.GET['suggestion'] cat_list = get_category_list(8, starts_with) print 'Hello', cat_list return render(request, 'rango/index.html', {'cat_list': cat_list })
[ "nikolay.borovenskiy@gmail.com" ]
nikolay.borovenskiy@gmail.com
b540488509bc29117e14bf470b82eaab0f677a68
4c3ffe142d476c81d6886fa568646485996bddfe
/sms/texts/filters.py
76fcb316cafd4da793b92bff35eac139a4c71fb0
[]
no_license
mosesju/sms_django
5c62e8fa83451ca7689289acb13acc8fddc9d5c4
64825c7b05c5a74242141a0479acd379ca1655e8
refs/heads/master
2022-07-26T10:33:10.502713
2020-05-16T08:36:26
2020-05-16T08:36:26
261,977,597
0
0
null
null
null
null
UTF-8
Python
false
false
156
py
import django_filters from .models import Text class TextFilter(django_filters.FilterSet): class Meta: model = Text fields = '__all__'
[ "e.julianmoses6@gmail.com" ]
e.julianmoses6@gmail.com
1bf2158bc437ca181fbc66a1c3e55214a6f792ff
7ed9b1d87012cd11ecc1625cadbea861223e82c5
/plugins/devices/FakeCamera.py
cfad8329be69fc9640b4f45f25b6f0e84cc7df71
[]
no_license
dsblank/pyrobot
577bdcb1cd68777b76aaada11ff3d3c3c5231c38
d9c19947767a97980ec31d2096ec157bafa55f0d
refs/heads/master
2021-01-21T21:19:48.788998
2019-04-01T03:55:06
2019-04-01T03:55:06
94,819,207
2
2
null
2019-03-30T17:03:32
2017-06-19T20:43:18
Python
UTF-8
Python
false
false
174
py
from pyrobot.camera.fake import FakeCamera from pyrobot.vision.cvision import VisionSystem def INIT(robot): return {"camera": FakeCamera(visionSystem = VisionSystem())}
[ "doug.blank@gmail.com" ]
doug.blank@gmail.com
20820d2999c8256213b38a1a0ee968ca846d22ce
5242b58f4cfe6cb82834fa859a6a81e88033cc08
/utils/login_required.py
52a8ffbe3fd82e3b2c27641048af9dea82e8303b
[]
no_license
CollinChiang/ChatApp
06a8128e986141e49221d6bdc270bc62475ef46e
bd1b25c53fde390ba4695e49e98e1d64ce1637e2
refs/heads/main
2023-07-07T16:59:38.681747
2021-09-08T02:59:31
2021-09-08T02:59:31
402,828,633
0
0
null
null
null
null
UTF-8
Python
false
false
302
py
from flask import redirect, session from functools import wraps def login_required(func): @wraps(func) def decorated_function(*args, **kwargs): if session.get("user_id") is None: return redirect("/login") return func(*args, **kwargs) return decorated_function
[ "46460223+CollinChiang@users.noreply.github.com" ]
46460223+CollinChiang@users.noreply.github.com
474ab03160c08cfcd303d16a89320616f916a642
099ff999f3d80e8b6e47b85bda30b2499c442bfe
/gcp_hpo/smart_search.py
b3d85010e21db971af388784f4ceb38cc430242a
[ "MIT" ]
permissive
lucasiscovici/DeepMining
7c0e32e83f03c83be7863316f669a0df1774b00a
db0a30a2cea7a525355f5d6246ccd07d4dfe4ce5
refs/heads/master
2020-04-30T08:32:44.756945
2018-07-03T18:25:05
2018-07-03T18:25:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
17,979
py
""" Hyper-parameter optimization through randomized search, GP-based, and GCP-based. """ # Author: Sebastien Dubois # for ALFA Group, CSAIL, MIT # The MIT License (MIT) # Copyright (c) 2015 Sebastien Dubois # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import numpy as np from random import randint, randrange import search_utils as utils from gcp_hpo.gcp.gcp import GaussianCopulaProcess from sklearn.gaussian_process import GaussianProcess from sklearn.cross_validation import check_cv, _fit_and_score from sklearn.metrics.scorer import check_scoring from sklearn.base import is_classifier, clone class SmartSearch(object): """The class for GCP-based hyper-parameter optimization. Also handling randomized and GP-based search. **Parameters** ---------- `parameters` : dict, parameter space on which to optimize the estimator The keys of the dictionnary should be the names of the parameters, and the values should be lists of length 2; the first element being the type of the parameter ('int', 'float' or 'cat' [for categorical]), and the second element being a list of either the bounds between which to search (for 'int' and 'float') or the values the parameter can take (for 'cat') Example : parameters = {'kernel' : ['cat', ['rbf','poly']], 'd' : ['int', [1,3]], 'C' : ['float',[1,10])} `estimator` : 1) sklearn estimator or 2) callable 1 : object type that implements the "fit" and "predict" methods, as a classifier or a pipeline 2 : a function that computes the output given a dictionnary of parameters. The returned value should be a list of one or more floats if score_format == 'cv', and a float if score_format == 'avg'. `X` : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. `y` : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. `model` : string, optional The model to run. Choose between : - GCP (non-parametric (Latent) Gaussian Copula Process) - GP (Gaussian Process) - rand (samples at random) `score_format` : string ('cv' or 'avg'), optional 'avg' considers only the mean of the CV results while 'cv' stores all values Default is 'cv' `fit_params` : dict, optional Parameters to pass to the fit method. `scoring` : string, callable or None, optional A string (see sklearn's model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. Default is None. `cv` : integer or cross-validation generator, optional Relevant if the estimator is an sklearn object. If an integer is passed, it is the number of folds. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects Default is 5. `acquisition function` : string, optional Function to maximize in order to choose the next parameter to test. - Simple : maximize the predicted output - UCB : maximize the upper confidence bound - EI : maximizes the expected improvement Default is 'UCB' `corr_kernel` : string, optional Correlation kernel to choose for the GCP. Possible choices are : - exponential_periodic (a linear combination of 3 classic kernels) - squared_exponential Default is 'exponential_periodic'. `n_iter` : int Total number of iterations to perform (including n_init and n_final_iter). Default is 100. `n_init` : int, optional Number of random iterations to perform before the smart sampling. Default is 30. `n_final_iter` : int, optional Number of final iterations, ie. smart iterations but with acquisition_function == 'Simple' Default is 5. `n_candidates` : int, optional Number of random candidates to sample for each GCP / GP iterations Default is 500. `n_clusters` : int, optional Number of clusters used in the parameter space to build a variable mapping for the GCP. Default is 1. `cluster_evol` : string {'constant', 'step', 'variable'}, optional Method used to set the number of clusters. If 'constant', the number of clusters is set with n_clusters. If 'step', start with one cluster, and set n_clusters after 20 smart steps. If 'variable', start with one cluster and increase n_clusters by one every 30 smart steps. Default is constant. `n_clusters_max` : int, optional The maximum value for n_clusters (relevant only if cluster_evol <> 'constant'). Default is 5. `nugget` : float, optional The nugget to set for the Gaussian Copula Process or Gaussian Process. Default is 1.e-10. `GCP_mapWithNoise` : boolean, optional If True and if Y outputs contain multiple noisy observations for the same x inputs, then all the noisy observations are used to compute Y's distribution and learn the mapping function. Otherwise, only the mean of the outputs, for a given input x, is considered. Default is False. `GCP_useAllNoisyY` : boolean, optional If True and if Y outputs contain multiple noisy observations for the same x inputs, then all the warped noisy observations are used to fit the GP. Otherwise, only the mean of the outputs, for a given input x, is considered. Default is False. `model_noise` : string {'EGN',None}, optional Method to model the noise. If not None and if Y outputs contain multiple noisy observations for the same x inputs, then the nugget is estimated from the standard deviation of the multiple outputs for a given input x. Default is None. `detailed_res` : int, optional Specify the level of details to return. 0 : tested parameters and mean outputs 1 : tested parameters and list of CV results 2 : tested parameters, search path, list of all CV results and mean outputs Default is 1. **Attributes** ---------- `best_parameter_` : dict, the parameter set, from those tested by the method _fit, that maximizes the mean of the cross-validation results. `tested_parameters_` : ndarray, the parameters tested by _fit `cv_scores_` : if score_format == 'cv', list of all the CV results of the parameters tested by _fit; if score_format == 'avg', array of the mean CV results of the parameters tested by _fit **Examples** -------- >>> from sklearn.datasets import load_digits >>> iris = load_digits() >>> X, y = iris.data, iris.target >>> clf = RandomForestClassifier(n_estimators=20) >>> parameters = { "max_depth": ['int',[3, 3]], "max_features": ['int',[1,11]], "min_samples_split": ['int',[1,11]], "min_samples_leaf": ['int',[1,11]], "bootstrap": ['cat',[True, False]], "criterion": ['cat',["gini", "entropy"]]} >>> search = SmartSearch(parameters,estimator=clf,X=X,y=y,n_iter=20) >>> search._fit() """ def __init__(self, parameters, estimator, X=None, y=None, model='GCP', score_format = 'cv', fit_params=None, scoring=None, cv=5, acquisition_function = 'UCB', corr_kernel= 'squared_exponential', n_clusters=1, n_clusters_max=5, cluster_evol = 'constant', GCP_mapWithNoise=False, GCP_useAllNoisyY=False, model_noise=None, n_iter=100, n_init=10, n_final_iter = 5, n_candidates = 500, nugget=1.e-10, detailed_res=1, verbose=1): self.parameters = parameters self.n_parameters = len(parameters) self.n_iter = n_iter self.n_init = n_init self.n_final_iter = n_final_iter self.n_candidates = n_candidates self.param_names = sorted(parameters.keys()) self.param_isInt = np.array([ 0 if (parameters[k][0]=='float') else 1 for k in self.param_names ]) self.param_bounds = np.zeros((self.n_parameters,2)) self.verbose = verbose self.scoring = scoring self.estimator = estimator self.fit_params = fit_params if fit_params is not None else {} self.cv = cv self.X = X self.y = y self.model = model self.score_format = score_format # 'cv' or 'avg' self.acquisition_function = acquisition_function self.corr_kernel = corr_kernel self.n_clusters = n_clusters self.n_clusters_max = n_clusters_max self.cluster_evol = cluster_evol self.GCP_mapWithNoise = GCP_mapWithNoise self.GCP_useAllNoisyY = GCP_useAllNoisyY self.model_noise = model_noise self.GCP_upperBound_coef = 1.96 self.nugget = nugget self.detailed_res = detailed_res self.best_parameter_ = None self.tested_parameters_ = None self.cv_scores_ = None if(cluster_evol != 'constant'): self.GCP_args = [corr_kernel, 1,GCP_mapWithNoise,GCP_useAllNoisyY,model_noise,nugget,self.GCP_upperBound_coef] else: self.GCP_args = [corr_kernel, n_clusters,GCP_mapWithNoise,GCP_useAllNoisyY,model_noise,nugget,self.GCP_upperBound_coef] if(callable(estimator)): self._callable_estimator = True if(verbose): print('Estimator is a callable and not an sklearn Estimator') else: self._callable_estimator = False if not self._callable_estimator: self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) # init param_bounds for i in range(self.n_parameters): if(parameters[self.param_names[i]][0]=='cat'): self.param_bounds[i,0] = 0 self.param_bounds[i,1] = len(parameters[self.param_names[i]][1]) else: self.param_bounds[i] = np.array(parameters[self.param_names[i]][1]) if(parameters[self.param_names[i]][0]=='int'): self.param_bounds[i,1] += 1 if(self.verbose): print(self.parameters) print(self.param_names) print(self.param_isInt) print(self.param_bounds) def vector_to_dict(self,vector_parameter): dict_parameter = dict.fromkeys(self.param_names) for i in range(self.n_parameters): if(self.parameters[self.param_names[i]][0]=='cat'): dict_parameter[self.param_names[i]] = (self.parameters[self.param_names[i]][1])[int(vector_parameter[i])] elif(self.parameters[self.param_names[i]][0]=='int'): dict_parameter[self.param_names[i]] = int(vector_parameter[i]) else: dict_parameter[self.param_names[i]] = vector_parameter[i] return dict_parameter def score(self,test_parameter): """ The score function to call in order to evaluate the quality of the parameter test_parameter Parameters ---------- `tested_parameter` : dict, the parameter to test Returns ------- `score` : the CV score, either the list of all cv results or the mean (depending of score_format) """ if not self._callable_estimator: cv = check_cv(self.cv, self.X, self.y, classifier=is_classifier(self.estimator)) cv_score = [ _fit_and_score(clone(self.estimator), self.X, self.y, self.scorer_, train, test, False, test_parameter, self.fit_params, return_parameters=True) for train, test in cv ] n_test_samples = 0 mean_score = 0 detailed_score = [] for tmp_score, tmp_n_test_samples, _, _ in cv_score: detailed_score.append(tmp_score) tmp_score *= tmp_n_test_samples n_test_samples += tmp_n_test_samples mean_score += tmp_score mean_score /= float(n_test_samples) if(self.score_format == 'avg'): score = mean_score else: # format == 'cv' score = detailed_score else: if(self.score_format == 'avg'): score = [self.estimator(test_parameter)] else: # format == 'cv' score = self.estimator(test_parameter) return score def _fit(self): """ Run the hyper-parameter optimization process Returns ------- `tested_parameters_` : ndarray, the parameters tested during the process `cv_scores_` : if score_format == 'cv', list of all the CV results of the parameters tested; if score_format == 'avg', array of the mean CV results of the parameters tested """ n_tested_parameters = 0 tested_parameters = np.zeros((self.n_iter,self.n_parameters)) cv_scores = [] if(self.detailed_res ==2): search_path = np.zeros((self.n_iter,self.n_parameters)) ### Initialize with random candidates ### init_candidates = utils.sample_candidates(self.n_init,self.param_bounds,self.param_isInt) self.n_init = init_candidates.shape[0] if(self.verbose): print('Start random init') for i in range(self.n_init): dict_candidate = self.vector_to_dict(init_candidates[i,:]) cv_score = self.score(dict_candidate) if(self.verbose): print ('Step ' + str(i) + ' - Hyperparameter ' + str(dict_candidate) + ' ' + str(np.mean(cv_score))) is_in,idx = utils.is_in_ndarray(init_candidates[i,:],tested_parameters[:n_tested_parameters,:]) if not is_in: tested_parameters[n_tested_parameters,:] = init_candidates[i,:] cv_scores.append(cv_score) n_tested_parameters += 1 else: if(self.verbose): print('Hyperparameter already tesed') cv_scores[idx] += cv_score if(self.detailed_res ==2): search_path[i,:] = init_candidates[i,:] ### Smart Search ### if(self.verbose): print('Start smart search') i_mod_10 = 0 for i in range(self.n_iter - self.n_init - self.n_final_iter): if(i==20 and self.cluster_evol=='step'): self.GCP_args[1] = n_clusters if(i/10 > (i_mod_10+2) and self.cluster_evol=='variable'): self.GCP_args[0] = self.GCP_args[0] self.GCP_args[1] = min(self.GCP_args[1]+1,self.n_clusters_max) i_mod_10 += 3 # Sample candidates and predict their corresponding acquisition values candidates = utils.sample_candidates(self.n_candidates,self.param_bounds,self.param_isInt) # Model and retrieve the candidate that maximezes the acquisiton function best_candidate = utils.find_best_candidate(self.model, tested_parameters[:n_tested_parameters,:], cv_scores, self.GCP_args, candidates, self.verbose, self.acquisition_function) dict_candidate = self.vector_to_dict(best_candidate) cv_score = self.score(dict_candidate) if(self.verbose): print ('Step ' + str(i+self.n_init) + ' - Hyperparameter ' + str(dict_candidate) + ' ' + str(np.mean(cv_score))) is_in,idx = utils.is_in_ndarray(best_candidate,tested_parameters[:n_tested_parameters,:]) if not is_in: tested_parameters[n_tested_parameters,:] = best_candidate cv_scores.append(cv_score) n_tested_parameters += 1 else: if(self.verbose): print('Hyperparameter already tesed') cv_scores[idx] += cv_score if(self.detailed_res ==2): search_path[i + self.n_init,:] = best_candidate ### Final steps ### self.acquisition_function = 'Simple' for i in range(self.n_final_iter): # Sample candidates and predict their corresponding acquisition values candidates = utils.sample_candidates(self.n_candidates,self.param_bounds,self.param_isInt) # Model and retrieve the candidate that maximezes the acquisiton function best_candidate = utils.find_best_candidate(self.model, tested_parameters[:n_tested_parameters,:], cv_scores, self.GCP_args, candidates, self.verbose, self.acquisition_function) dict_candidate = self.vector_to_dict(best_candidate) cv_score = self.score(dict_candidate) if(self.verbose): print ('Step ' + str(i+self.n_iter - self.n_final_iter) + ' - Hyperparameter ' + str(dict_candidate) + ' ' + str(np.mean(cv_score))) is_in,idx = utils.is_in_ndarray(best_candidate,tested_parameters[:n_tested_parameters,:]) if not is_in: tested_parameters[n_tested_parameters,:] = best_candidate cv_scores.append(cv_score) n_tested_parameters += 1 else: if(self.verbose): print('Hyperparameter already tesed') cv_scores[idx] += cv_score if(self.detailed_res ==2): search_path[i + self.n_iter - self.n_final_iter,:] = best_candidate # compute the averages of CV results mean_scores = [] for o in cv_scores: mean_scores.append(np.mean(o)) # find the max best_idx = np.argmax(mean_scores) vector_best_param = tested_parameters[best_idx] best_parameter = self.vector_to_dict(vector_best_param) # store self.best_parameter_ = best_parameter self.tested_parameters_ = tested_parameters[:n_tested_parameters,:] if(self.verbose): print ('\nTested ' + str(n_tested_parameters) + ' parameters') print ('Max cv score ' + str(mean_scores[best_idx])) print ('Best parameter ' + str(tested_parameters[best_idx])) print(best_parameter) if(self.detailed_res == 1): self.cv_scores_ = list(cv_scores) return tested_parameters[:n_tested_parameters,:], cv_scores elif(self.detailed_res == 2): self.cv_scores_ = list(cv_scores) return tested_parameters[:n_tested_parameters,:], search_path, cv_scores, mean_scores else: self.cv_scores_ = mean_scores return tested_parameters[:n_tested_parameters,:], mean_scores
[ "sdubois.sebastien@gmail.com" ]
sdubois.sebastien@gmail.com
14878d495a08c0d015f2c65e2bc704123e4d4d3d
f5049260eb609e77be212b0d77dc32f283a2a498
/Hobbies/Hobbies/wsgi.py
1392c7d4c178e46636e80bcb034eb978499490d5
[]
no_license
MrKreg/HobbiesProject
35a6726c1be5d8044a1c170df1d43d5f631ffcdf
d13eeca5deec2d24500ac0a014efa77531f66b54
refs/heads/master
2020-04-13T15:50:04.938191
2018-12-27T22:51:25
2018-12-27T22:51:25
163,301,361
0
0
null
null
null
null
UTF-8
Python
false
false
530
py
# +++++++++++ DJANGO +++++++++++ # To use your own Django app use code like this: import os import sys # assuming your Django settings file is at '/home/myusername/mysite/mysite/settings.py' path = '/home/MrKreg/HobbiesProject/Hobbies' if path not in sys.path: sys.path.insert(0, path) os.environ['DJANGO_SETTINGS_MODULE'] = 'Hobbies.settings' ## Uncomment the lines below depending on your Django version ###### then, for Django >=1.5: from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
[ "taras.sheketa@gmail.com" ]
taras.sheketa@gmail.com
fc155d1aea9ba0987c0503f3f248de84efbb9d67
359f7a903c35baafa372fd6f245a7390933d5c6b
/phygeograph/phygeograph/model/kddwei.py
3fbd27d185978d7f9e05fc0f35a786a92f88559f
[ "MIT", "LicenseRef-scancode-other-permissive" ]
permissive
phygeograph/phygeograph
dd7c870ab32f941b372bfec4d532406389b8ae6f
a96ecbae25cd3c785685a840a079aafb30b4a1d4
refs/heads/master
2023-08-02T08:23:56.914452
2021-09-27T01:54:57
2021-09-27T01:54:57
409,607,477
1
0
null
null
null
null
UTF-8
Python
false
false
4,185
py
import torch import scipy.spatial def knngeo(x, y, k, batch_x=None, batch_y=None): r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in :obj:`x`. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. y (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. k (int): The number of neighbors. batch_x (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) batch_y (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each node to a specific example. (default: :obj:`None`) :rtype: :class:`LongTensor` .. testsetup:: import torch from geographnet.model.kddwei import knngeo .. testcode:: """ if batch_x is None: batch_x = x.new_zeros(x.size(0), dtype=torch.long) if batch_y is None: batch_y = y.new_zeros(y.size(0), dtype=torch.long) x = x.view(-1, 1) if x.dim() == 1 else x y = y.view(-1, 1) if y.dim() == 1 else y assert x.dim() == 2 and batch_x.dim() == 1 assert y.dim() == 2 and batch_y.dim() == 1 assert x.size(1) == y.size(1) assert x.size(0) == batch_x.size(0) assert y.size(0) == batch_y.size(0) # if x.is_cuda: # return torch_cluster.knn_cuda.knn(x, y, k, batch_x, batch_y) # Rescale x and y. min_xy = min(x.min().item(), y.min().item()) x, y = x - min_xy, y - min_xy max_xy = max(x.max().item(), y.max().item()) x, y, = x / max_xy, y / max_xy # Concat batch/features to ensure no cross-links between examples exist. x = torch.cat([x, 2 * x.size(1) * batch_x.view(-1, 1).to(x.dtype)], dim=-1) y = torch.cat([y, 2 * y.size(1) * batch_y.view(-1, 1).to(y.dtype)], dim=-1) #tree = scipy.spatial.cKDTree(x.detach().numpy()) tree = scipy.spatial.cKDTree(x.detach().cpu().numpy()) # dist, col = tree.query(y.detach().cpu(), k=k, distance_upper_bound=x.size(1)) dist, col = tree.query(y.detach().cpu(), k=k, distance_upper_bound=x.size(1)) dist = torch.from_numpy(dist).to(x.dtype) col = torch.from_numpy(col).to(torch.long) row = torch.arange(col.size(0), dtype=torch.long).view(-1, 1).repeat(1, k) # mask = 1 - torch.isinf(dist).view(-1) mask = ~ torch.isinf(dist).view(-1) row, col = row.view(-1)[mask], col.view(-1)[mask] dist=dist.view(-1)[mask] return torch.stack([row, col, dist], dim=0) def knnd_geograph(x, k, batch=None, loop=False, flow='source_to_target'): r"""Computes graph edges to the nearest :obj:`k` points. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. k (int): The number of neighbors. batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) loop (bool, optional): If :obj:`True`, the graph will contain self-loops. (default: :obj:`False`) flow (string, optional): The flow direction when using in combination with message passing (:obj:`"source_to_target"` or :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) :rtype: :class:`LongTensor` .. testsetup:: import torch from geographnet.model.kddwei import knnd_geograph .. testcode:: >>> x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) >>> batch = torch.tensor([0, 0, 0, 0]) >>> edge_index = knnd_geograph(x, k=2, batch=batch, loop=False) """ assert flow in ['source_to_target', 'target_to_source'] row, col, dist = knngeo(x, x, k if loop else k + 1, batch, batch) row, col = (col, row) if flow == 'source_to_target' else (row, col) if not loop: mask = row != col row, col = row[mask], col[mask] dist = dist[mask] row=row.long() col=col.long() return (torch.stack([row, col], dim=0),dist)
[ "phygeograph@gmail.com" ]
phygeograph@gmail.com
8a8b8ba79006a28316ef9aa505f79d5b02b4b33a
2481cde6506743565dff2b405a2396daf208ab3e
/src/ranking/management/modules/algorithm_yandex.py
704ac764e959da19a6154d23d666eb6a1279c8cb
[ "Apache-2.0" ]
permissive
aropan/clist
4819a3036d179595e4df8c646aff2ed593b9dad3
5c805b2af71acee97f993f19d8d4e229f7f5b411
refs/heads/master
2023-08-31T11:15:17.987776
2023-08-27T21:51:14
2023-08-27T21:52:16
187,111,853
276
35
Apache-2.0
2023-09-06T18:42:53
2019-05-16T22:57:03
Python
UTF-8
Python
false
false
586
py
# -*- coding: utf-8 -*- import re from ranking.management.modules import yandex class Statistic(yandex.Statistic): def get_standings(self, *args, **kwargs): standings = super().get_standings(*args, **kwargs) if re.search(r'\bfinals?\b', self.name, re.I): if 'medals' not in standings.get('options', {}) and 'medals' not in self.info.get('standings', {}): options = standings.setdefault('options', {}) options['medals'] = [{'name': name, 'count': 1} for name in ('gold', 'silver', 'bronze')] return standings
[ "nap0rbl4@gmail.com" ]
nap0rbl4@gmail.com
2d80f7c9fbbe827f590fe956913c72d4b5e11451
0ce587a8932592fd989e0be9bf3ee65469875078
/quantdsl/syntax.py
6b91619504be69797bc17073add5820c3e8cf245
[ "BSD-3-Clause" ]
permissive
caiorss/quantdsl
98bdb73426a874e49ee71b7f030b528c4d479e02
eaf72f5656628530e51a3ef4d0e83a903c55b86b
refs/heads/master
2021-01-17T21:26:14.519914
2014-09-17T21:25:38
2014-09-17T21:25:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,662
py
import ast from quantdsl.exceptions import DslSyntaxError class DslParser(object): def parse(self, dslSource, filename='<unknown>', dslClasses=None): """ Creates a DSL Module object from a DSL source text. """ self.dslClasses = {} if dslClasses: assert isinstance(dslClasses, dict) self.dslClasses.update(dslClasses) if not isinstance(dslSource, basestring): raise DslSyntaxError("Can't parse non-string object", dslSource) assert isinstance(dslSource, basestring) try: # Parse as Python source code, into a Python abstract syntax tree. astModule = ast.parse(dslSource, filename=filename, mode='exec') except SyntaxError, e: raise DslSyntaxError("DSL source code is not valid Python code", e) # Generate Quant DSL from Python AST. return self.visitAstNode(astModule) def visitAstNode(self, node): """ Identifies which "visit" method to call, according to type of node being visited. Returns the result of calling the identified "visit" method. """ assert isinstance(node, ast.AST) # Construct the "visit" method name. dslElementName = node.__class__.__name__ methodName = 'visit' + dslElementName # Try to get the "visit" method object. try: method = getattr(self, methodName) except AttributeError: msg = "element '%s' is not supported (visit method '%s' not found on parser): %s" % ( dslElementName, methodName, node) raise DslSyntaxError(msg) # Call the "visit" method object, and return the result of visiting the node. return method(node=node) def visitReturn(self, node): """ Visitor method for ast.Return nodes. Returns the result of visiting the expression held by the return statement. """ assert isinstance(node, ast.Return) return self.visitAstNode(node.value) def visitModule(self, node): """ Visitor method for ast.Module nodes. Returns a DSL Module, with a list of DSL expressions as the body. """ assert isinstance(node, ast.Module) body = [self.visitAstNode(n) for n in node.body] return self.dslClasses['Module'](body, node=node) def visitExpr(self, node): """ Visitor method for ast.Expr nodes. Returns the result of visiting the contents of the expression node. """ assert isinstance(node, ast.Expr) if isinstance(node.value, ast.AST): return self.visitAstNode(node.value) else: raise DslSyntaxError def visitNum(self, node): """ Visitor method for ast.Name. Returns a DSL Number object, with the number value. """ assert isinstance(node, ast.Num) return self.dslClasses['Number'](node.n, node=node) def visitStr(self, node): """ Visitor method for ast.Str. Returns a DSL String object, with the string value. """ assert isinstance(node, ast.Str) return self.dslClasses['String'](node.s, node=node) def visitUnaryOp(self, node): """ Visitor method for ast.UnaryOp. Returns a specific DSL UnaryOp object (e.g UnarySub), along with the operand. """ assert isinstance(node, ast.UnaryOp) args = [self.visitAstNode(node.operand)] if isinstance(node.op, ast.USub): dslUnaryOpClass = self.dslClasses['UnarySub'] else: raise DslSyntaxError("Unsupported unary operator token: %s" % node.op) return dslUnaryOpClass(node=node, *args) def visitBinOp(self, node): """ Visitor method for ast.BinOp. Returns a specific DSL BinOp object (e.g Add), along with the left and right operands. """ assert isinstance(node, ast.BinOp) typeMap = { ast.Add: self.dslClasses['Add'], ast.Sub: self.dslClasses['Sub'], ast.Mult: self.dslClasses['Mult'], ast.Div: self.dslClasses['Div'], ast.Pow: self.dslClasses['Pow'], ast.Mod: self.dslClasses['Mod'], ast.FloorDiv: self.dslClasses['FloorDiv'], } try: dslClass = typeMap[type(node.op)] except KeyError: raise DslSyntaxError("Unsupported binary operator token", node.op, node=node) args = [self.visitAstNode(node.left), self.visitAstNode(node.right)] return dslClass(node=node, *args) def visitBoolOp(self, node): """ Visitor method for ast.BoolOp. Returns a specific DSL BoolOp object (e.g And), along with the left and right operands. """ assert isinstance(node, ast.BoolOp) typeMap = { ast.And: self.dslClasses['And'], ast.Or: self.dslClasses['Or'], } try: dslClass = typeMap[type(node.op)] except KeyError: raise DslSyntaxError("Unsupported boolean operator token: %s" % node.op) else: values = [self.visitAstNode(v) for v in node.values] args = [values] return dslClass(node=node, *args) def visitName(self, node): """ Visitor method for ast.Name. Returns a DSL Name object, along with the name's string. """ return self.dslClasses['Name'](node.id, node=node) def visitCall(self, node): """ Visitor method for ast.Call. Returns a built-in DSL expression, or a DSL FunctionCall if the name refers to a user defined function. """ if node.keywords: raise DslSyntaxError("Calling with keywords is not currently supported (positional args only).") if node.starargs: raise DslSyntaxError("Calling with starargs is not currently supported (positional args only).") if node.kwargs: raise DslSyntaxError("Calling with kwargs is not currently supported (positional args only).") # Collect the call arg expressions (whose values will be passed into the call when it is made). callArgExprs = [self.visitAstNode(arg) for arg in node.args] # Check the called node is an ast.Name. calledNode = node.func assert isinstance(calledNode, ast.Name) calledNodeName = calledNode.id # Construct a DSL object for this call. try: # Resolve the name with a new instance of a DSL class. dslClass = self.dslClasses[calledNodeName] except KeyError: # Resolve as a FunctionCall, and expect # to resolve the name to a function def later. dslNameClass = self.dslClasses['Name'] dslArgs = [dslNameClass(calledNodeName, node=calledNode), callArgExprs] return self.dslClasses['FunctionCall'](node=node, *dslArgs) else: baseDslObjectClass = self.dslClasses['DslObject'] assert issubclass(dslClass, baseDslObjectClass), dslClass return dslClass(node=node, *callArgExprs) def visitFunctionDef(self, node): """ Visitor method for ast.FunctionDef. Returns a named DSL FunctionDef, with a definition of the expected call argument values. """ name = node.name dslFunctionArgClass = self.dslClasses['FunctionArg'] callArgDefs = [dslFunctionArgClass(arg.id, '') for arg in node.args.args] assert len(node.body) == 1, "Function defs with more than one body statement are not supported at the moment." decoratorNames = [astName.id for astName in node.decorator_list] body = self.visitAstNode(node.body[0]) dslArgs = [name, callArgDefs, body, decoratorNames] functionDef = self.dslClasses['FunctionDef'](node=node, *dslArgs) return functionDef def visitIfExp(self, node): """ Visitor method for ast.IfExp. Returns a named DSL IfExp, with a test DSL expression and expressions whose usage is conditional upon the test. """ test = self.visitAstNode(node.test) body = self.visitAstNode(node.body) orelse = self.visitAstNode(node.orelse) args = [test, body, orelse] return self.dslClasses['IfExp'](node=node, *args) def visitIf(self, node): """ Visitor method for ast.If. Returns a named DSL If object, with a test DSL expression and expressions whose usage is conditional upon the test. """ test = self.visitAstNode(node.test) assert len(node.body) == 1, "If statements with more than one body statement are not supported at the moment." body = self.visitAstNode(node.body[0]) assert len( node.orelse) == 1, "If statements with more than one orelse statement are not supported at the moment." orelse = self.visitAstNode(node.orelse[0]) args = [test, body, orelse] return self.dslClasses['If'](node=node, *args) def visitCompare(self, node): """ Visitor method for ast.Compare. Returns a named DSL Compare object, with operators (ops) and operands (comparators). """ left = self.visitAstNode(node.left) opNames = [o.__class__.__name__ for o in node.ops] comparators = [self.visitAstNode(c) for c in node.comparators] args = [left, opNames, comparators] return self.dslClasses['Compare'](node=node, *args)
[ "john.bywater@appropriatesoftware.net" ]
john.bywater@appropriatesoftware.net
01f8f0584f0144c40fc052b2a100e5205895b8a8
0a484912bc857c037144f5f2ab8b56374eddb7ec
/image_upload/settings.py
64d3fa84ef4ff0a4f2b23136e488e9356b320351
[]
no_license
jeslin01/circleci-imageupload
fbb83c7363dd18c664ced390e8bb5a5f0b1fb0ff
99183a323ca3204c7ab3d769d1133bfc143fc3f7
refs/heads/master
2022-12-12T22:43:30.167594
2019-03-30T06:39:08
2019-03-30T06:39:08
178,416,081
0
0
null
2022-12-08T04:55:16
2019-03-29T14:02:07
Python
UTF-8
Python
false
false
3,547
py
""" Django settings for image_upload project. Generated by 'django-admin startproject' using Django 2.1.7. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'fl1qpsp=4_&ue_43s87io-vgk15w225jxpc#9j+&ht!fra+!fk' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [ 'ci-imageupload.herokuapp.com', '127.0.0.1' ] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'main_app', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'image_upload.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR,'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'image_upload.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(os.path.abspath(os.path.dirname(__file__)), "static",), os.path.join(BASE_DIR, 'static'), ) PROJECT_DIR = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT_DEVELOPMENT = os.path.join(PROJECT_DIR, '../static') # Media files MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
[ "ashish.g@synsoftglobal.com" ]
ashish.g@synsoftglobal.com
8646a1f6a167a9781784fe5d0fd498c804a04f66
bcc4f3188c6744530d8b05c47a83288bf520346e
/config.py
d8e673f496cdbc0b36766e3b8328a24a56db291d
[]
no_license
MikeSoft007/DukkaINC_
deb31389e349b78bf4b3ee5e151686e013b6e57c
100358cc9d8ad92481b21280b2ccb33481269713
refs/heads/master
2023-06-26T12:18:24.358905
2021-08-02T22:57:47
2021-08-02T22:57:47
391,277,792
0
0
null
null
null
null
UTF-8
Python
false
false
236
py
import os basedir = os.path.abspath(os.path.dirname(__file__)) # creating a configuration class class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' MONGO_URI = os.environ.get('MONGO_URI')
[ "mekpenyong2@gmail.com" ]
mekpenyong2@gmail.com
8a97071f6de0931196876b2e68bc90a3e6b3f411
6acdc11c60e874e85adba173abb90f279049ab89
/coding.py
38bacad180c9b8b5d530c8e109dc4a1c43653658
[]
no_license
bala977/balaji
3ce9123709b1be1a24b8fd2235405e9a9be68382
ba73e7591e7c86daef86081a15a19760dcac16b8
refs/heads/master
2020-06-11T16:58:56.256340
2019-08-05T10:03:27
2019-08-05T10:03:27
194,030,685
0
4
null
null
null
null
UTF-8
Python
false
false
93
py
#B N1,P2=input().split() if (int(N1)-int(P2))%2==0: print("even") else: print("odd")
[ "noreply@github.com" ]
noreply@github.com
b5097dc639ce1b85de30e5898a505721e3bb28f1
a2e638cd0c124254e67963bda62c21351881ee75
/Extensions/Deal Capture Examples/FPythonCode/MiniFutureInsDef.py
f25b228e0539ca28ceaaee4e44dc3bd4a628ca06
[]
no_license
webclinic017/fa-absa-py3
1ffa98f2bd72d541166fdaac421d3c84147a4e01
5e7cc7de3495145501ca53deb9efee2233ab7e1c
refs/heads/main
2023-04-19T10:41:21.273030
2021-05-10T08:50:05
2021-05-10T08:50:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,795
py
from __future__ import print_function import acm, ael def SetUpMiniFuture(definitionSetUp): from DealCaptureSetup import AddInfoSetUp, CustomMethodSetUp definitionSetUp.AddSetupItems( AddInfoSetUp( recordType='Instrument', fieldName='MiniFuture', dataType='Boolean', description='CustomInsdef', dataTypeGroup='Standard', subTypes=['Warrant'], defaultValue='', mandatory=False), AddInfoSetUp( recordType='Instrument', fieldName='RateMargin', dataType='Double', description='CustomInsdef', dataTypeGroup='Standard', subTypes=['Warrant'], defaultValue='', mandatory=False) ) definitionSetUp.AddSetupItems( CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFuture', methodName='MiniFuture'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureFinancingLevel', methodName='MiniFutureFinancingLevel'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureFinancingSpread', methodName='MiniFutureFinancingSpread'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureInterestRateMargin', methodName='MiniFutureInterestRateMargin'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureStopLoss', methodName='MiniFutureStopLoss'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureUnderlyingType', methodName='MiniFutureUnderlyingType'), CustomMethodSetUp( className='FWarrant', customMethodName='GetMiniFutureUnderlyingType', methodName='MiniFutureUnderlyingType'), CustomMethodSetUp( className='FWarrant', customMethodName='SetMiniFuture', methodName='SetMiniFuture'), CustomMethodSetUp( className='FWarrant', customMethodName='SetMiniFutureFinancingLevel', methodName='MiniFutureFinancingLevel'), CustomMethodSetUp( className='FWarrant', customMethodName='SetMiniFutureInterestRateMargin', methodName='MiniFutureInterestRateMargin'), CustomMethodSetUp( className='FWarrant', customMethodName='SetMiniFutureStopLoss', methodName='MiniFutureStopLoss'), CustomMethodSetUp( className='FWarrant', customMethodName='SetMiniFutureUnderlyingType', methodName='MiniFutureUnderlyingType') ) def SetUnderlyingType(instrument, underlyingType): instrument.UnderlyingType(underlyingType) return def GetUnderlyingType(instrument): return instrument.UnderlyingType() def GetMiniFuture(instrument): isMiniFuture = None try: isMiniFuture = instrument.AdditionalInfo().MiniFuture() except Exception as e: print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.") return isMiniFuture def GetFinancingSpread(instrument): if instrument.StrikePrice(): premium=instrument.Barrier()-instrument.StrikePrice() premiumPercent=premium/instrument.StrikePrice()*100 return premiumPercent else: return 0 def SetStopLoss(instrument, stopLoss): instrument.Barrier(stopLoss) if instrument.StrikePrice(): premium=instrument.Barrier()-instrument.StrikePrice() if premium < 0: instrument.SuggestOptionType(False) else: instrument.SuggestOptionType(True) return def GetStopLoss(instrument): return instrument.Barrier() def SetFinancingLevel(instrument, financingLevel): instrument.StrikePrice(financingLevel) if instrument.StrikePrice(): premium=instrument.Barrier()-instrument.StrikePrice() if premium < 0: instrument.SuggestOptionType(False) else: instrument.SuggestOptionType(True) return def GetFinancingLevel(instrument): return instrument.StrikePrice() def SetMiniFuture(instrument, miniFuture): try: instrument.AdditionalInfo().MiniFuture(miniFuture) except: print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.") return def SetRateMargin(instrument, rateMargin): try: instrument.AdditionalInfo().RateMargin(rateMargin) except: print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type double called RateMargin and restart system.") def GetRateMargin(instrument): try: if instrument.AdditionalInfo().RateMargin(): return instrument.AdditionalInfo().RateMargin() else: return 0.0 except: print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type double called RateMargin and restart system.") def UpdateDefaultInstrument(ins): # Not possible to set AddInfo fields on default instrument. Set Mini Future field to true. try: ins.AdditionalInfo().MiniFuture(True) except: print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.") if not ins.Exotic(): # This code will set up the Barrier if no default barrier instrument exists ins.ExoticType('Other') e=acm.FExotic() ins.Exotics().Add(e) e.RegisterInStorage() e.BarrierOptionType("Up & In")
[ "nencho.georogiev@absa.africa" ]
nencho.georogiev@absa.africa
a9d9e1b1168497075525714a3c133139ddf2971d
f3e01cec9dafab76911743281b9078d71a3f1201
/python/example/client_threading_demo.py
f123e755238ccc556cb00f1dff62de7f6992a4fd
[ "Apache-2.0" ]
permissive
yinzhiyan43/harpc
ec31c765e6e31cd4cbd86dbd7583e8244b49667c
0ca4e4e72bdea0d539852438b2e782cc9d87b83b
refs/heads/master
2020-12-31T02:49:09.794181
2015-10-27T07:59:47
2015-10-27T07:59:47
47,240,224
2
1
null
2015-12-02T05:55:33
2015-12-02T05:55:33
null
UTF-8
Python
false
false
1,719
py
# -*- coding: utf-8 -*- import threading import logging import time from tutorial import TutorialService from bfd.harpc import client from bfd.harpc.common import config threads = 15 req_num = 10 data = [] error = 0 for i in range(0,10240): data.append(chr(i%64 + 32)) test_msg= ''.join(data) logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', filename='./logs/clientdemo.log', filemode='w') def process(proxy_client, process_num): global error for i in range(0,req_num): try: proxy_client.echo(test_msg) except Exception as e: error = error + 1 print "request error %s" %e print("process_num:%s end" % process_num) if __name__ == '__main__': # read config file #conf = config.Config("./etc/demo_client.conf") # setting config use zk conf = config.Config() conf.set("client", "service", "python_test$EchoService") conf.set("client", "zk_connect_str", "172.18.1.22:2181") manager = client.Client(TutorialService.Client, conf) proxy_client = manager.create_proxy() jobs = [] # 创建多线程,多线程公用一个proxy_client start = time.time() for i in range(0, threads): td = threading.Thread(target=process, args=(proxy_client, i)) td.start() jobs.append(td) for job in jobs: job.join() end = time.time() req_time = end-start total = req_num*threads print "total : %s" % total print "total time: %s" % req_time print "error num : %s" % error print "tps : %s" % (total/req_time) manager.close()
[ "dongsheng.fan@baifendian.com" ]
dongsheng.fan@baifendian.com
aee96f6de4e6fd2ecd47ec453c188c2895fc41c9
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/173/usersdata/268/81652/submittedfiles/moedas.py
b3b9d67c5c88d783592b6b36e092be5012cfea8b
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
380
py
# -*- coding: utf-8 -*- a=int(input('Digite o valor de a: ')) b=int(input('Digite o valor de b: ')) c=int(input('Digite o valor da cédula: ')) w=0 x9=0 while(w<c): duvida= a*w comprovacao= (n-a*w) if (comprovacao%b)==0: print(duvida) print(b*(comprovacao/b) if ((comprovacao%b)!=0) : x9=x9 +1 w=w+1 if(x9==n): print('N')
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
a3836b9cc28eb0e6ec8bf9fd09847553c55d5a49
1b77f2c085e7c6c80359dc3ecee07ccbe3ffe502
/tests/__init__.py
b076850ce129f3f11bca9048433050d239b6a19e
[]
no_license
hecttormendoza/front-odoo
93c1e55ebbd09d999f5bd681ce1af627f3f81454
88350ca4867de19decaca078aac54744fc8226a3
refs/heads/master
2020-03-26T22:12:23.878965
2018-08-22T19:08:57
2018-08-22T19:08:57
145,440,717
0
0
null
null
null
null
UTF-8
Python
false
false
33
py
from . import test_tour_teachers
[ "hecttormendoza@gmail.com" ]
hecttormendoza@gmail.com
b41041bcfcd4a8b7be11c744db2facf36c93d6c6
1acaf9a72cc49c0b707695c7131c94963312807a
/TOPfilter/StringSettleV7.py
fdfcddb30096b52f136abfa7cf8240378371bdca
[]
no_license
wblyy/Music_MetaGrabber
df201fd6e46467324567c3a08f68bc491902bcda
aed99b62c159bcc9ee982825f33ffbf58c6665ea
refs/heads/master
2016-09-05T10:52:35.747895
2014-12-23T07:50:46
2014-12-23T07:50:46
26,156,496
1
3
null
null
null
null
UTF-8
Python
false
false
3,363
py
#-*-coding:utf-8-*- #V7根据甲方需求的xls从QQ的更多json中拿歌,实现批量拿取批量入库,下一阶段再整合百度已有的歌单进行去重 #可以将甲方的需求看做一个二维数组 import urllib2# 使用库: urllib2 from mydbV1 import MydbV1 dbV1 = MydbV1.instance() QQ_boundary=[ [1,179], [2,344], [4,94], [5,355], [6,86], [7,95], [9,306], [12,263], [13,170], [14,55], [17,18], [18,19] ]#根据甲方需求出的二维数组 TOP_name=["QQ音乐KTV榜","QQ音乐ChannelV榜","QQ音乐日本公信榜","QQ音乐韩国NMET榜","QQ音乐英国UK榜","QQ音乐美国公告牌-hot100榜","QQ音乐幽浮劲碟榜","QQ音乐ituns榜","QQ音乐香港商业电台榜","QQ音乐中国TOP排行榜","QQ音乐雪碧音碰音榜","QQ音乐MTV光荣榜"] for top in range(6,12):#从第1类TOP到第12类TOP #第一类榜单爬到了112号,手贱把VPN断了,不开VPN确实快很多 #第七类榜单爬到306号,需要一个抛异常机制,同时把漏网之鱼写入日志 for volume in range(1,QQ_boundary[top][1]):#从第一期到最近一期 print 'Top=' print top print 'Volum=' print volume request = urllib2.Request(url="http://y.qq.com/y/static/toplist/json/global/"+str(QQ_boundary[top][0])+"/"+str(volume)+"_1.js?&hostUin=0&format=jsonp&inCharset=GB2312&outCharset=utf-8&notice=0&platform=yqq&jsonpCallback=MusicJsonCallback&needNewCode=0") # 使用urllib2创建一个访问请求, 指定url为"http://www.baidu.com/", 并且把访问请求保存在request这个变量里面 all_the_text = urllib2.urlopen(request).read() # 使用urllib2打开request这个请求(通过urlopen()函数), 并且读取数据(使用read()函数), 把结果保存在result这个变量里面 mark = 's:' #这里的mark变了 segment='|'#分隔符 startPos=0 endPos=0 Songname=[] Artistname=[] Album=[] mPos=[0] #记录特殊符号位置的list,令其第一个值为0 index=0 while(all_the_text.find(mark,mPos[index]+1)!=-1):#读到最后的mark mPos.append(all_the_text.find(mark,mPos[index]+1))#继续找下一个,注意+1 startPos=all_the_text.find(segment,mPos[index])#起始位 endPos=all_the_text.find(segment,startPos+1)#结束位置 Songname.append(all_the_text[startPos+1:endPos]) startPos=all_the_text.find(segment,endPos+1)#下一个空挡位 endPos=all_the_text.find(segment,startPos+1)#下一个空挡位 Artistname.append(all_the_text[startPos+1:endPos]) startPos=all_the_text.find(segment,endPos+1)#下一个空挡位 endPos=all_the_text.find(segment,startPos+1)#下一个空挡位 Album.append(all_the_text[startPos+1:endPos]) index=index+1 else: print 'mPos=' print mPos print len(mPos) #str="国标舞" #print str.decode('utf-8').encode('gb18030')##纠结的编码print问题 #print 'Songname=' #for song in Songname: #print song.decode('utf-8').encode('gb18030') for i in range(0,len(Songname)): if dbV1.get_id(Songname[i],Artistname[i]): print "Duplicated!!!"#发现重复的不入库 else: print "inserting...."#这才入库 dbV1.insert_song(Songname[i],Artistname[i],Album[i],TOP_name[top]) #db.insert_song(Songname[i],Artistname[i],Album[i])#入库跟print不同,不需要转码
[ "wblyy0911@gmail.com" ]
wblyy0911@gmail.com
a0556965cb02a37c5455038729b9fc1ee24bf7c7
27d2a191da28364cc921b097957f9e5b46514a2b
/res_net fusion/l.py
13455f6fea3a09e34d0792c255814b1af2957a98
[]
no_license
yesichao/works
18513504150199a1eca48e861e452039da737d0f
9c47ed1a26a205b135deeaa5fa70a350823ff88b
refs/heads/master
2021-06-30T02:33:05.914134
2020-12-18T04:57:39
2020-12-18T04:57:39
204,428,171
0
0
null
null
null
null
UTF-8
Python
false
false
5,316
py
import warnings import wfdb import os import numpy as np import operator from utils import * os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2' warnings.filterwarnings("ignore") data_path='D:/python/bwl res_net/MIT-BIH' DS1=[101,106,108,109,112,114,115,116,118,119,122,124,201,203,205,207,208,209,215,220,223,230] DS2=[100,103,105,111,113,117,121,123,200,202,210,212,213,214,219,221,222,228,231,232,233,234] data = {'train': DS1, 'test': DS2 } target_class = ['train', 'test'] MITBIH_classes = ['N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F'] def check_overtime(i,length): s = data[target_class[i]] N_sig = [] S_sig = [] V_sig = [] F_sig = [] N_sig_sample = [] S_sig_sample = [] V_sig_sample = [] F_sig_sample = [] N_sig_seg_class = [] S_sig_seg_class = [] V_sig_seg_class = [] F_sig_seg_class = [] over_time = np.zeros(4) for k in range(len(s)): start_time = 0 end_time = start_time + length print(data_path + '/' + str(s[k]) + target_class[i]) record = wfdb.rdrecord(data_path + '/' + str(s[k]), sampfrom=0, channel_names=['MLII']) sigal = record.p_signal annotation = wfdb.rdann(data_path + '/' + str(s[k]), 'atr') while end_time <= sigal.shape[0]: sign = sigal[start_time:end_time] seg_class = [] seg_class1 = [] sample=[] for j in range(annotation.ann_len - 1): if annotation.sample[j] >= start_time and annotation.sample[j] <= end_time: sample.append(annotation.sample[j]-start_time) if annotation.symbol[j] == 'N' or annotation.symbol[j] == 'L' or annotation.symbol[j] == 'R' or \ annotation.symbol[j] == 'e' or annotation.symbol[j] == 'j': seg_class.append(0) seg_class1.append(0) elif annotation.symbol[j] == 'A' or annotation.symbol[j] == 'a' or annotation.symbol[j] == 'J' or \ annotation.symbol[j] == 'S': seg_class.append(1) seg_class1.append(1) elif annotation.symbol[j] == 'V' or annotation.symbol[j] == 'E': seg_class.append(2) seg_class1.append(2) elif annotation.symbol[j] == 'F': seg_class.append(3) seg_class1.append(3) elif annotation.symbol[j] == '/' or annotation.symbol[j] == 'f' or annotation.symbol[j] == 'Q': seg_class.append(4) seg_class1.append(4) if len(set(seg_class)) == 1 and seg_class[0] == 0: N_sig.append(sign) N_sig_sample.append(sample) N_sig_seg_class.append(seg_class1) elif len(set(seg_class)) != 0: while 0 in seg_class: seg_class.remove(0) if max(seg_class, key=seg_class.count) == 1: S_sig.append(sign) S_sig_sample.append(sample) S_sig_seg_class.append(seg_class1) elif max(seg_class, key=seg_class.count) == 2: V_sig.append(sign) V_sig_sample.append(sample) V_sig_seg_class.append(seg_class1) elif max(seg_class, key=seg_class.count) == 3: F_sig.append(sign) F_sig_sample.append(sample) F_sig_seg_class.append(seg_class1) start_time = end_time end_time = start_time + length num = [len(N_sig), len(S_sig), len(V_sig), len(F_sig)] print([len(N_sig_sample), len(S_sig_sample), len(V_sig_sample), len(F_sig_sample)]) print([len(N_sig_seg_class), len(S_sig_seg_class), len(V_sig_seg_class), len(F_sig_seg_class)]) print([len(N_sig), len(S_sig), len(V_sig), len(F_sig)]) N_sig = np.asarray(N_sig, dtype=np.float32) # 将训练的图像数据原来是list现在变成np.array格式 S_sig = np.asarray(S_sig, dtype=np.float32) V_sig = np.asarray(V_sig, dtype=np.float32) F_sig = np.asarray(F_sig, dtype=np.float32) np.save('D:/python/bwl res_net/row_data/N_sig.npy',N_sig) np.save('D:/python/bwl res_net/row_data/S_sig.npy', S_sig) np.save('D:/python/bwl res_net/row_data/V_sig.npy', V_sig) np.save('D:/python/bwl res_net/row_data/F_sig.npy', F_sig) np.save('D:/python/bwl res_net/row_data/N_sig_sample.npy',N_sig_sample) np.save('D:/python/bwl res_net/row_data/S_sig_sample.npy', S_sig_sample) np.save('D:/python/bwl res_net/row_data/V_sig_sample.npy', V_sig_sample) np.save('D:/python/bwl res_net/row_data/F_sig_sample.npy', F_sig_sample) np.save('D:/python/bwl res_net/row_data/N_sig_seg_class.npy',N_sig_seg_class) np.save('D:/python/bwl res_net/row_data/S_sig_seg_class.npy', S_sig_seg_class) np.save('D:/python/bwl res_net/row_data/V_sig_seg_class.npy', V_sig_seg_class) np.save('D:/python/bwl res_net/row_data/F_sig_seg_class.npy', F_sig_seg_class) return num check_overtime(0,1440)
[ "noreply@github.com" ]
noreply@github.com
cd7628ee316b36ef515d2a007cfa5fcc404713c9
83ca25974495f197a737b9f8c0b591a61a1fe595
/moviebooking/movie/urls.py
44ef0db5c5e8803fd20b16e931cdde89158ef3e0
[]
no_license
ShazibHK/Django_MovieBooking
0d6b044353348ee5d00a3cc274b3c670566b5e9d
64abe1e34958b100c4d94adede71a66f5d61e40a
refs/heads/main
2023-07-12T23:26:08.137343
2021-08-24T17:03:01
2021-08-24T17:03:01
399,532,784
0
0
null
null
null
null
UTF-8
Python
false
false
1,233
py
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('', views.index, name='movieIndex'), path('', views.autocomplete, name='autocomplete'), path('home_page2.html/', views.search_content, name='search_content'), path('login/', views.customerLogin, name='customerLogin'), path('logout/', views.customerLogout, name='customerLogout'), path('signup/', views.customerSignup, name='customerSignup'), path('customer_home/', views.customerHome, name='customerHome'), path('bookings/<int:movie_id>', views.bookings, name='bookings'), path('bookingMovie/', views.bookingMovie, name='movieBookings'), path('comments/<int:movie_id>',views.comment, name='comments'), path('movieComments/', views.movieComments, name='movieComments'), path('bookingList/', views.bookingList, name='listbooking'), path('cancelBooking/<int:movie_id>', views.cancelBooking, name='cancelBooking'), path('report/', views.pie_chart, name="pie_chart"), path('report2/', views.population_chart, name='population_chart'), ] admin.site.site_header = 'Movie Booking' admin.site.site_title = 'Movie Booking' admin.site.index_title = 'Welcome to the Admin page'
[ "mca.1919@unigoa.ac.in" ]
mca.1919@unigoa.ac.in
b3589abd67feb9c0e4a4504c8763190db14f3597
c08721ea0ab07fbf866b0612d315fed770a4e260
/docs/source/conf.py
5d8789ab32ae4f9a3d934bae3cd3c78ab84299bf
[ "MIT" ]
permissive
gunnarx/pyfranca
84262c7f760eeeb60285fae401ff4f3d9b1d6f67
d986e4b97229f9673d8349b2de77c541a9377faf
refs/heads/master
2022-05-03T13:07:35.027609
2022-04-12T07:51:30
2022-04-12T21:15:10
88,442,258
3
1
MIT
2022-04-12T21:15:12
2017-04-16T21:00:37
Python
UTF-8
Python
false
false
9,369
py
# -*- coding: utf-8 -*- # # pyfranca documentation build configuration file, created by # sphinx-quickstart on Mon Mar 13 03:43:16 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os from recommonmark.parser import CommonMarkParser # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', ] # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyFranca' copyright = u'2016-2017, Kaloyan Tenchov' author = u'Kaloyan Tenchov' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'0.4.1' # The full version, including alpha/beta/rc tags. release = u'0.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["**tests**"] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pyfrancadoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pyfranca.tex', u'PyFranca Documentation', u'Kaloyan Tenchov', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pyfranca', u'PyFranca Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pyfranca', u'PyFranca Documentation', author, 'pyfranca', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False source_parsers = { '.md': CommonMarkParser }
[ "zayfod@gmail.com" ]
zayfod@gmail.com
b506ffdba484cd5f343426bd1f96c751004ba9fa
eaeb685d13ef6c58364c5497c911f3e2f8c49a43
/Solution/922_Sort_Array_By_Parity_II.py
09d03afec572e8518220ff839a03cf91f98b8253
[]
no_license
raririn/LeetCodePractice
8b3a18e34a2e3524ec9ae8163e4be242c2ab6d64
48cf4f7d63f2ba5802c41afc2a0f75cc71b58f03
refs/heads/master
2023-01-09T06:09:02.017324
2020-09-10T02:34:46
2020-09-10T02:34:46
123,109,055
0
0
null
null
null
null
UTF-8
Python
false
false
557
py
class Solution: def sortArrayByParityII(self, A: List[int]) -> List[int]: ret = [0] * len(A) pos_e = 0 pos_o = 1 for i in A: if i % 2 == 0: ret[pos_e] = i pos_e += 2 else: ret[pos_o] = i pos_o += 2 return ret ''' Runtime: 244 ms, faster than 81.55% of Python3 online submissions for Sort Array By Parity II. Memory Usage: 16 MB, less than 8.70% of Python3 online submissions for Sort Array By Parity II. '''
[ "raririn.sandbag@gmail.com" ]
raririn.sandbag@gmail.com
1aeb85afcb4a936df8b77f6a86713552f468588e
200633e6583244f7b2c4d10464880c6e44e9003e
/Assignment3_ Ziyuan_Guan/seq_gram.py
624f4ed36b407dd6b688acae52233fa120b0f8bf
[]
no_license
Chesterguan/BigData
859a3f1f4b355cf848d53b030844e0c7c869964c
9465ef27542b4cd1842c8f2213f1532878d21701
refs/heads/master
2021-09-11T14:29:26.254631
2018-04-09T01:28:53
2018-04-09T01:28:53
125,310,923
0
0
null
null
null
null
UTF-8
Python
false
false
2,771
py
import keras from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, Dropout from keras.layers import Embedding from keras.layers import LSTM import numpy as np import copy import random import h5py import csv datas = [] labels = [] with open('train.csv') as f: reader = csv.reader(f) for row in reader: datas.append(row[1]) labels.append(row[2]) del datas[0] del labels[0] read=np.column_stack((datas,labels)) data_p=read[:,0] label_p=read[:,1] datas,y_train,labels,y_test=train_test_split(data_p,label_p,test_size=0.2) print(y_train.shape) # Convert letters to integers label = np.zeros((1600, 1)) label = np.reshape(labels, (1600, 1)) input = np.zeros((1600, 14, 4)) input2 = np.zeros((400, 14, 4)) def switch(letter=''): if letter == 'A': return np.array([1, 0, 0, 0]) elif letter == 'C': return np.array([0, 1, 0, 0]) elif letter == 'G': return np.array([0, 0, 1, 0]) else: return np.array([0, 0, 0, 1]) for i in range(1600): for j in range(14): vec = copy.copy(switch(datas[i][j])) input[i][j] = vec for i in range(400): for j in range(14): vec2 = copy.copy(switch(y_train[i][j])) input2[i][j] = vec max_features=14 # Initialize Network model = Sequential() ''' model.add(Conv1D(32, kernel_size=3, strides=1, activation='relu', input_shape=(14, 4))) model.add(Conv1D(64, kernel_size=3, strides=1, activation='relu', input_shape=(14, 32))) model.add(Conv1D(128, kernel_size=3, strides=1, activation='relu', input_shape=(14, 64))) model.add(Conv1D(256, kernel_size=3, strides=1, activation='relu', input_shape=(14, 128))) model.add(MaxPooling1D(pool_size=3, strides=1)) model.add(GRU(512,dropout=0.2, recurrent_dropout=0.2, return_sequences=True)) model.add(GRU(512,dropout=0.2, recurrent_dropout=0.2, return_sequences=True)) model.add(Flatten()) model.add(Dense(2, activation='softmax')) ''' #model.add(Embedding(14,4)) model.add(LSTM(256,return_sequences=True,input_shape=(14,4))) model.add(LSTM(256,return_sequences=True)) model.add(LSTM(256)) model.add(Dense(64, activation='relu', input_dim=14)) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64,activation='relu')) model.add(Dropout(0.2)) model.add(Dense(2, activation='softmax')) adamx = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0001) model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.fit(input, label, epochs=1000, batch_size=100) score, acc = model.evaluate(input2,y_test, batch_size=100) print('Test score:', score) print('Test accuracy:', acc) model.save("./model4.h5")
[ "ziyuan.guan@ufl.edu" ]
ziyuan.guan@ufl.edu
e6565fbb7fe151f4adbf8fb13de191e7f9e51300
fed0e0bed42f6297aa6b92c4af9be15edf363535
/models.py
194d40065f650d67baf95c691d68177479af81b8
[]
no_license
djwilder316/learningflask
7d99572f77a8b4af39c845d4cf6e70a6132e5d58
2cbe4a0b37d2f274353e4806dc3e917467a73d9e
refs/heads/master
2020-03-28T16:12:14.056789
2018-09-17T20:22:24
2018-09-17T20:22:24
148,669,516
0
0
null
null
null
null
UTF-8
Python
false
false
2,255
py
from flask_sqlalchemy import SQLAlchemy from werkzeug import generate_password_hash, check_password_hash import geocoder import urllib import json db = SQLAlchemy() class User(db.Model): __tablename__ = 'users' uid = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(100)) lastname = db.Column(db.String(100)) email = db.Column(db.String(120), unique=True) pwdhash = db.Column(db.String(54)) def __init__(self, firstname, lastname, email, password): self.firstname = firstname.title() self.lastname = lastname.title() self.email = email.lower() self.set_password(password) def set_password(self, password): self.pwdhash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.pwdhash, password) # p = Place() # places = p.query("1600 Amphitheater Parkway Mountain View CA") class Place(object): def meters_to_walking_time(self, meters): # 80 meters is one minute walking time return int(meters / 80) def wiki_path(self, slug): return urllib.parse.urljoin("http://en.wikipedia.org/wiki/", slug.replace(' ', '_')) def address_to_latlng(self, address): g = geocoder.google(address) return (g.lat, g.lng) def query(self, address): lat, lng = self.address_to_latlng(address) query_url = 'https://en.wikipedia.org/w/api.php?action=query&list=geosearch&gsradius=5000&gscoord={0}%7C{1}&gslimit=20&format=json'.format(lat, lng) g = urllib.request.urlopen(query_url) results = g.read() g.close() data = json.loads(results) places = [] for place in data['query']['geosearch']: name = place['title'] meters = place['dist'] lat = place['lat'] lng = place['lon'] wiki_url = self.wiki_path(name) walking_time = self.meters_to_walking_time(meters) d = { 'name': name, 'url': wiki_url, 'time': walking_time, 'lat': lat, 'lng': lng } places.append(d) return places
[ "djwilder316@gmail.com" ]
djwilder316@gmail.com
d3c0c2506005e3610ea98dcccd01620e15a319f2
31ac0d992ebe3f8e40e31d48cb79a1708fa51b8f
/blogging/migrations/0003_auto_20200328_1255.py
3a322781d46d6500db0d4bb8226bf21c4773ac24
[]
no_license
franjaku/django-blog
78a94f8a56d7083fbf43a084561619530f9d933b
9da7dd0525519cfa378dcca834b62297f046cb46
refs/heads/master
2021-04-23T03:36:19.730623
2020-04-09T06:30:53
2020-04-09T06:30:53
249,895,075
0
0
null
null
null
null
UTF-8
Python
false
false
428
py
# Generated by Django 2.1.1 on 2020-03-28 19:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blogging', '0002_category'), ] operations = [ migrations.AlterField( model_name='category', name='posts', field=models.ManyToManyField(blank=True, related_name='categories', to='blogging.Post'), ), ]
[ "kulyckyf@umich.edu" ]
kulyckyf@umich.edu
0e6e154633b3d4ca832a232c20e7f1a4740450f6
e3c9877255b7591e3b12d12644fd13518a45b855
/account_rbi/controllers/controllers.py
0f31ed1992e8bac15980770a8ca0307391448360
[]
no_license
fadynoor/rbi
c445e7a53462c104b1c59ebc19291d0293a3bb43
ba956c42618cae7ad70f2ce22d10caa0847d7499
refs/heads/master
2022-12-26T17:18:00.448927
2020-09-25T10:40:25
2020-09-25T10:40:25
278,926,020
0
0
null
null
null
null
UTF-8
Python
false
false
762
py
# -*- coding: utf-8 -*- # from odoo import http # class AccountRbi(http.Controller): # @http.route('/account_rbi/account_rbi/', auth='public') # def index(self, **kw): # return "Hello, world" # @http.route('/account_rbi/account_rbi/objects/', auth='public') # def list(self, **kw): # return http.request.render('account_rbi.listing', { # 'root': '/account_rbi/account_rbi', # 'objects': http.request.env['account_rbi.account_rbi'].search([]), # }) # @http.route('/account_rbi/account_rbi/objects/<model("account_rbi.account_rbi"):obj>/', auth='public') # def object(self, obj, **kw): # return http.request.render('account_rbi.object', { # 'object': obj # })
[ "fady.noor@gmail.com" ]
fady.noor@gmail.com
d82de8f764febc64cf530f2dc46b710cd433c73d
e05f8d36c70336a8714cc260c02fe85ecee2e62e
/subject/tests/functional/v1/test_api.py
cfca2ac0204a25f545c1ca0124f77b4b5b32a902
[ "Apache-2.0" ]
permissive
laoyigrace/subject
eafa442b5d9ebf83c78a01ce3bb5d088d08d620d
e6ed989fdc250917a19788112b22322b73b3550f
refs/heads/master
2021-01-11T00:06:54.790751
2016-10-24T02:13:32
2016-10-24T02:13:32
70,754,470
0
0
null
null
null
null
UTF-8
Python
false
false
37,886
py
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test case that utilizes httplib2 against the API server""" import hashlib import httplib2 import sys from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from subject.tests import functional from subject.tests.utils import minimal_headers from subject.tests.utils import skip_if_disabled FIVE_KB = 5 * units.Ki FIVE_GB = 5 * units.Gi class TestApi(functional.FunctionalTest): """Functional tests using httplib2 against the API server""" def _check_subject_create(self, headers, status=201, subject_data="*" * FIVE_KB): # performs subject_create request, checks the response and returns # content http = httplib2.Http() path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) response, content = http.request( path, 'POST', headers=headers, body=subject_data) self.assertEqual(status, response.status) return content def test_checksum_32_chars_at_subject_create(self): self.cleanup() self.start_servers(**self.__dict__.copy()) headers = minimal_headers('Subject1') subject_data = "*" * FIVE_KB # checksum can be no longer that 32 characters (String(32)) headers['X-Subject-Meta-Checksum'] = 'x' * 42 content = self._check_subject_create(headers, 400) self.assertIn("Invalid checksum", content) # test positive case as well headers['X-Subject-Meta-Checksum'] = hashlib.md5(subject_data).hexdigest() self._check_subject_create(headers) def test_param_int_too_large_at_create(self): # currently 2 params min_disk/min_ram can cause DBError on save self.cleanup() self.start_servers(**self.__dict__.copy()) # Integer field can't be greater than max 8-byte signed integer for param in ['min_disk', 'min_ram']: headers = minimal_headers('Subject1') # check that long numbers result in 400 headers['X-Subject-Meta-%s' % param] = str(sys.maxint + 1) content = self._check_subject_create(headers, 400) self.assertIn("'%s' value out of range" % param, content) # check that integers over 4 byte result in 400 headers['X-Subject-Meta-%s' % param] = str(2 ** 31) content = self._check_subject_create(headers, 400) self.assertIn("'%s' value out of range" % param, content) # verify positive case as well headers['X-Subject-Meta-%s' % param] = str((2 ** 31) - 1) self._check_subject_create(headers) @skip_if_disabled def test_get_head_simple_post(self): """ We test the following sequential series of actions: 0. GET /subjects - Verify no public subjects 1. GET /subjects/detail - Verify no public subjects 2. POST /subjects with public subject named Subject1 and no custom properties - Verify 201 returned 3. HEAD subject - Verify HTTP headers have correct information we just added 4. GET subject - Verify all information on subject we just added is correct 5. GET /subjects - Verify the subject we just added is returned 6. GET /subjects/detail - Verify the subject we just added is returned 7. PUT subject with custom properties of "distro" and "arch" - Verify 200 returned 8. PUT subject with too many custom properties - Verify 413 returned 9. GET subject - Verify updated information about subject was stored 10. PUT subject - Remove a previously existing property. 11. PUT subject - Add a previously deleted property. 12. PUT subject/members/member1 - Add member1 to subject 13. PUT subject/members/member2 - Add member2 to subject 14. GET subject/members - List subject members 15. DELETE subject/members/member1 - Delete subject member1 16. PUT subject/members - Attempt to replace members with an overlimit amount 17. PUT subject/members/member11 - Attempt to add a member while at limit 18. POST /subjects with another public subject named Subject2 - attribute and three custom properties, "distro", "arch" & "foo" - Verify a 200 OK is returned 19. HEAD subject2 - Verify subject2 found now 20. GET /subjects - Verify 2 public subjects 21. GET /subjects with filter on user-defined property "distro". - Verify both subjects are returned 22. GET /subjects with filter on user-defined property 'distro' but - with non-existent value. Verify no subjects are returned 23. GET /subjects with filter on non-existent user-defined property - "boo". Verify no subjects are returned 24. GET /subjects with filter 'arch=i386' - Verify only subject2 is returned 25. GET /subjects with filter 'arch=x86_64' - Verify only subject1 is returned 26. GET /subjects with filter 'foo=bar' - Verify only subject2 is returned 27. DELETE subject1 - Delete subject 28. GET subject/members - List deleted subject members 29. PUT subject/members/member2 - Update existing member2 of deleted subject 30. PUT subject/members/member3 - Add member3 to deleted subject 31. DELETE subject/members/member2 - Delete member2 from deleted subject 32. DELETE subject2 - Delete subject 33. GET /subjects - Verify no subjects are listed """ self.cleanup() self.start_servers(**self.__dict__.copy()) # 0. GET /subjects # Verify no public subjects path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"subjects": []}', content) # 1. GET /subjects/detail # Verify no public subjects path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"subjects": []}', content) # 2. POST /subjects with public subject named Subject1 # attribute and no custom properties. Verify a 200 OK is returned subject_data = "*" * FIVE_KB headers = minimal_headers('Subject1') path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=subject_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) subject_id = data['subject']['id'] self.assertEqual(hashlib.md5(subject_data).hexdigest(), data['subject']['checksum']) self.assertEqual(FIVE_KB, data['subject']['size']) self.assertEqual("Subject1", data['subject']['name']) self.assertTrue(data['subject']['is_public']) # 3. HEAD subject # Verify subject found now path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Subject1", response['x-subject-meta-name']) # 4. GET subject # Verify all information on subject we just added is correct path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_subject_headers = { 'x-subject-meta-id': subject_id, 'x-subject-meta-name': 'Subject1', 'x-subject-meta-is_public': 'True', 'x-subject-meta-status': 'active', 'x-subject-meta-disk_format': 'raw', 'x-subject-meta-container_format': 'ovf', 'x-subject-meta-size': str(FIVE_KB)} expected_std_headers = { 'content-length': str(FIVE_KB), 'content-type': 'application/octet-stream'} for expected_key, expected_value in expected_subject_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) for expected_key, expected_value in expected_std_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) # 5. GET /subjects # Verify one public subject path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"subjects": [ {"container_format": "ovf", "disk_format": "raw", "id": subject_id, "name": "Subject1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 6. GET /subjects/detail # Verify subject and all its metadata path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_subject = { "status": "active", "name": "Subject1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": subject_id, "is_public": True, "deleted_at": None, "properties": {}, "size": 5120} subject = jsonutils.loads(content) for expected_key, expected_value in expected_subject.items(): self.assertEqual(expected_value, subject['subjects'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, subject['subjects'][0][expected_key])) # 7. PUT subject with custom properties of "distro" and "arch" # Verify 200 returned headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu', 'X-Subject-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual("x86_64", data['subject']['properties']['arch']) self.assertEqual("Ubuntu", data['subject']['properties']['distro']) # 8. PUT subject with too many custom properties # Verify 413 returned headers = {} for i in range(11): # configured limit is 10 headers['X-Subject-Meta-Property-foo%d' % i] = 'bar' path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(413, response.status) # 9. GET /subjects/detail # Verify subject and all its metadata path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_subject = { "status": "active", "name": "Subject1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": subject_id, "is_public": True, "deleted_at": None, "properties": {'distro': 'Ubuntu', 'arch': 'x86_64'}, "size": 5120} subject = jsonutils.loads(content) for expected_key, expected_value in expected_subject.items(): self.assertEqual(expected_value, subject['subjects'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, subject['subjects'][0][expected_key])) # 10. PUT subject and remove a previously existing property. headers = {'X-Subject-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['subjects'][0] self.assertEqual(1, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) # 11. PUT subject and add a previously deleted property. headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu', 'X-Subject-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['subjects'][0] self.assertEqual(2, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) self.assertEqual("Ubuntu", data['properties']['distro']) self.assertNotEqual(data['created_at'], data['updated_at']) # 12. Add member to subject path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 13. Add member to subject path = ("http://%s:%d/v1/subjects/%s/members/pattiewhite" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 14. List subject members path = ("http://%s:%d/v1/subjects/%s/members" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['members'])) self.assertEqual('pattieblack', data['members'][0]['member_id']) self.assertEqual('pattiewhite', data['members'][1]['member_id']) # 15. Delete subject member path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(204, response.status) # 16. Attempt to replace members with an overlimit amount # Adding 11 subject members should fail since configured limit is 10 path = ("http://%s:%d/v1/subjects/%s/members" % ("127.0.0.1", self.api_port, subject_id)) memberships = [] for i in range(11): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(413, response.status) # 17. Attempt to add a member while at limit # Adding an 11th member should fail since configured limit is 10 path = ("http://%s:%d/v1/subjects/%s/members" % ("127.0.0.1", self.api_port, subject_id)) memberships = [] for i in range(10): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(204, response.status) path = ("http://%s:%d/v1/subjects/%s/members/fail_me" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(413, response.status) # 18. POST /subjects with another public subject named Subject2 # attribute and three custom properties, "distro", "arch" & "foo". # Verify a 200 OK is returned subject_data = "*" * FIVE_KB headers = minimal_headers('Subject2') headers['X-Subject-Meta-Property-Distro'] = 'Ubuntu' headers['X-Subject-Meta-Property-Arch'] = 'i386' headers['X-Subject-Meta-Property-foo'] = 'bar' path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=subject_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) subject2_id = data['subject']['id'] self.assertEqual(hashlib.md5(subject_data).hexdigest(), data['subject']['checksum']) self.assertEqual(FIVE_KB, data['subject']['size']) self.assertEqual("Subject2", data['subject']['name']) self.assertTrue(data['subject']['is_public']) self.assertEqual('Ubuntu', data['subject']['properties']['distro']) self.assertEqual('i386', data['subject']['properties']['arch']) self.assertEqual('bar', data['subject']['properties']['foo']) # 19. HEAD subject2 # Verify subject2 found now path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject2_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Subject2", response['x-subject-meta-name']) # 20. GET /subjects # Verify 2 public subjects path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(2, len(subjects)) self.assertEqual(subject2_id, subjects[0]['id']) self.assertEqual(subject_id, subjects[1]['id']) # 21. GET /subjects with filter on user-defined property 'distro'. # Verify both subjects are returned path = "http://%s:%d/v1/subjects?property-distro=Ubuntu" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(2, len(subjects)) self.assertEqual(subject2_id, subjects[0]['id']) self.assertEqual(subject_id, subjects[1]['id']) # 22. GET /subjects with filter on user-defined property 'distro' but # with non-existent value. Verify no subjects are returned path = "http://%s:%d/v1/subjects?property-distro=fedora" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(0, len(subjects)) # 23. GET /subjects with filter on non-existent user-defined property # 'boo'. Verify no subjects are returned path = "http://%s:%d/v1/subjects?property-boo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(0, len(subjects)) # 24. GET /subjects with filter 'arch=i386' # Verify only subject2 is returned path = "http://%s:%d/v1/subjects?property-arch=i386" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(1, len(subjects)) self.assertEqual(subject2_id, subjects[0]['id']) # 25. GET /subjects with filter 'arch=x86_64' # Verify only subject1 is returned path = "http://%s:%d/v1/subjects?property-arch=x86_64" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(1, len(subjects)) self.assertEqual(subject_id, subjects[0]['id']) # 26. GET /subjects with filter 'foo=bar' # Verify only subject2 is returned path = "http://%s:%d/v1/subjects?property-foo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(1, len(subjects)) self.assertEqual(subject2_id, subjects[0]['id']) # 27. DELETE subject1 path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 28. Try to list members of deleted subject path = ("http://%s:%d/v1/subjects/%s/members" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) # 29. Try to update member of deleted subject path = ("http://%s:%d/v1/subjects/%s/members" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] body = jsonutils.dumps(dict(memberships=fixture)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(404, response.status) # 30. Try to add member to deleted subject path = ("http://%s:%d/v1/subjects/%s/members/chickenpattie" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(404, response.status) # 31. Try to delete member of deleted subject path = ("http://%s:%d/v1/subjects/%s/members/pattieblack" % ("127.0.0.1", self.api_port, subject_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(404, response.status) # 32. DELETE subject2 path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject2_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 33. GET /subjects # Verify no subjects are listed path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) subjects = jsonutils.loads(content)['subjects'] self.assertEqual(0, len(subjects)) # 34. HEAD /subjects/detail path = "http://%s:%d/v1/subjects/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(405, response.status) self.assertEqual('GET', response.get('allow')) self.stop_servers() def test_download_non_exists_subject_raises_http_forbidden(self): """ We test the following sequential series of actions:: 0. POST /subjects with public subject named Subject1 and no custom properties - Verify 201 returned 1. HEAD subject - Verify HTTP headers have correct information we just added 2. GET subject - Verify all information on subject we just added is correct 3. DELETE subject1 - Delete the newly added subject 4. GET subject - Verify that 403 HTTPForbidden exception is raised prior to 404 HTTPNotFound """ self.cleanup() self.start_servers(**self.__dict__.copy()) subject_data = "*" * FIVE_KB headers = minimal_headers('Subject1') path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=subject_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) subject_id = data['subject']['id'] self.assertEqual(hashlib.md5(subject_data).hexdigest(), data['subject']['checksum']) self.assertEqual(FIVE_KB, data['subject']['size']) self.assertEqual("Subject1", data['subject']['name']) self.assertTrue(data['subject']['is_public']) # 1. HEAD subject # Verify subject found now path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Subject1", response['x-subject-meta-name']) # 2. GET /subjects # Verify one public subject path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"subjects": [ {"container_format": "ovf", "disk_format": "raw", "id": subject_id, "name": "Subject1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 3. DELETE subject1 path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 4. GET subject # Verify that 403 HTTPForbidden exception is raised prior to # 404 HTTPNotFound rules = {"download_subject": '!'} self.set_policy_rules(rules) path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) self.stop_servers() def test_download_non_exists_subject_raises_http_not_found(self): """ We test the following sequential series of actions: 0. POST /subjects with public subject named Subject1 and no custom properties - Verify 201 returned 1. HEAD subject - Verify HTTP headers have correct information we just added 2. GET subject - Verify all information on subject we just added is correct 3. DELETE subject1 - Delete the newly added subject 4. GET subject - Verify that 404 HTTPNotFound exception is raised """ self.cleanup() self.start_servers(**self.__dict__.copy()) subject_data = "*" * FIVE_KB headers = minimal_headers('Subject1') path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=subject_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) subject_id = data['subject']['id'] self.assertEqual(hashlib.md5(subject_data).hexdigest(), data['subject']['checksum']) self.assertEqual(FIVE_KB, data['subject']['size']) self.assertEqual("Subject1", data['subject']['name']) self.assertTrue(data['subject']['is_public']) # 1. HEAD subject # Verify subject found now path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Subject1", response['x-subject-meta-name']) # 2. GET /subjects # Verify one public subject path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"subjects": [ {"container_format": "ovf", "disk_format": "raw", "id": subject_id, "name": "Subject1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 3. DELETE subject1 path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 4. GET subject # Verify that 404 HTTPNotFound exception is raised path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) self.stop_servers() def test_status_cannot_be_manipulated_directly(self): self.cleanup() self.start_servers(**self.__dict__.copy()) headers = minimal_headers('Subject1') # Create a 'queued' subject http = httplib2.Http() headers = {'Content-Type': 'application/octet-stream', 'X-Subject-Meta-Disk-Format': 'raw', 'X-Subject-Meta-Container-Format': 'bare'} path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'POST', headers=headers, body=None) self.assertEqual(201, response.status) subject = jsonutils.loads(content)['subject'] self.assertEqual('queued', subject['status']) # Ensure status of 'queued' subject can't be changed path = "http://%s:%d/v1/subjects/%s" % ("127.0.0.1", self.api_port, subject['id']) http = httplib2.Http() headers = {'X-Subject-Meta-Status': 'active'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(403, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('queued', response['x-subject-meta-status']) # We allow 'setting' to the same status http = httplib2.Http() headers = {'X-Subject-Meta-Status': 'queued'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('queued', response['x-subject-meta-status']) # Make subject active http = httplib2.Http() headers = {'Content-Type': 'application/octet-stream'} response, content = http.request(path, 'PUT', headers=headers, body='data') self.assertEqual(200, response.status) subject = jsonutils.loads(content)['subject'] self.assertEqual('active', subject['status']) # Ensure status of 'active' subject can't be changed http = httplib2.Http() headers = {'X-Subject-Meta-Status': 'queued'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(403, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('active', response['x-subject-meta-status']) # We allow 'setting' to the same status http = httplib2.Http() headers = {'X-Subject-Meta-Status': 'active'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('active', response['x-subject-meta-status']) # Create a 'queued' subject, ensure 'status' header is ignored http = httplib2.Http() path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) headers = {'Content-Type': 'application/octet-stream', 'X-Subject-Meta-Status': 'active'} response, content = http.request(path, 'POST', headers=headers, body=None) self.assertEqual(201, response.status) subject = jsonutils.loads(content)['subject'] self.assertEqual('queued', subject['status']) # Create an 'active' subject, ensure 'status' header is ignored http = httplib2.Http() path = "http://%s:%d/v1/subjects" % ("127.0.0.1", self.api_port) headers = {'Content-Type': 'application/octet-stream', 'X-Subject-Meta-Disk-Format': 'raw', 'X-Subject-Meta-Status': 'queued', 'X-Subject-Meta-Container-Format': 'bare'} response, content = http.request(path, 'POST', headers=headers, body='data') self.assertEqual(201, response.status) subject = jsonutils.loads(content)['subject'] self.assertEqual('active', subject['status']) self.stop_servers()
[ "yibo_grace@163.com" ]
yibo_grace@163.com
ff00a04615743bcd931d99ee7f9a21cade5d3410
3ea3f46bd4d7231c5eb5c1e1c02625f5290cac76
/heart/migrations/0003_auto_20170317_1846.py
0573637431da4546fd61ccdbdfb05f3edf19ea1b
[]
no_license
moonclearner/simpleDjangoProject
0340b0a744651bcc9dbd7a52b12c4827d40a7a5f
51fc70d4c499aa64e82a6f02c913f44c45cad323
refs/heads/master
2021-01-23T01:41:25.481027
2017-04-11T14:29:09
2017-04-11T14:29:09
85,927,020
0
0
null
null
null
null
UTF-8
Python
false
false
1,117
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-17 10:46 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('heart', '0002_auto_20170317_1841'), ] operations = [ migrations.AlterField( model_name='hbeat', name='Created_at', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='hpluse', name='Created_at', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='hpres', name='Created_at', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='hrelax', name='Created_at', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='htem', name='Created_at', field=models.DateTimeField(auto_now_add=True), ), ]
[ "718857460@qq.com" ]
718857460@qq.com
40895ed0380296e8484501ac5322c0acb1c93e90
692b7a9f0902ce86c4a32582b309d20d12df1631
/tcp_tests/fixtures/rally_fixtures.py
45e828a31aa7814ed0a0d1c64ce4827b7d137660
[]
no_license
dis-xcom/tcpcloud-devops-env
10bee9ab3dc722a5fd4ed116bbe7d09439e3a6e4
9d3b4530458513157b3662089288f7d85c656226
refs/heads/master
2021-01-11T04:29:27.164184
2016-10-18T10:39:00
2016-10-18T10:39:00
71,150,983
0
0
null
null
null
null
UTF-8
Python
false
false
1,029
py
# Copyright 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pytest from tcp_tests.managers import rallymanager @pytest.fixture(scope='function') def rally(underlay): """Fixture that provides various actions for TCP :param config: fixture provides oslo.config :param underlay: fixture provides underlay manager :rtype: RallyManager For use in tests or fixtures to deploy a custom TCP """ return rallymanager.RallyManager(underlay, 'cfg01')
[ "dis@srv09-kha.kha.mirantis.net" ]
dis@srv09-kha.kha.mirantis.net
6dbabae65ed075a12913a12e8af2019751371a24
fdd2ed32e45ca3dcc978cf7e5af76d2afd8cb9f9
/87.py
ecdbdd1ab3aee85a6e5da158c4511eb5ef0c7440
[]
no_license
Narendon123/python
e5295e5b71867fd6a90d080c01e2db6930659f95
cf0b3dd4ff4eb4d6d44f061b45d00baa25de5a38
refs/heads/master
2020-05-31T06:06:19.230781
2019-07-11T12:51:25
2019-07-11T12:51:25
190,134,515
0
1
null
null
null
null
UTF-8
Python
false
false
138
py
w=input() w=w.split() a=int(w[0]) b=int(w[1]) i=1 while(i<=a and i<=b): if(a%i==0 and b%i==0): gcd=i i=i+1 print(gcd)
[ "noreply@github.com" ]
noreply@github.com
286e6fc6fadb3b6f3acf790a8ff9b86949ce9f42
c9b1e04ba65ba3e0af2a8ae86b88187b72bcaa0b
/.svn/pristine/28/286e6fc6fadb3b6f3acf790a8ff9b86949ce9f42.svn-base
906aef8455f144197811b24c002a226a851f74cc
[]
no_license
feitianyiren/TaskCoach
7762a89d5b521cfba0827323a9e8a91d1579810b
0b7427562074845ac771e59e24a750aa5b432589
refs/heads/master
2020-04-08T04:56:35.491490
2016-01-12T13:29:03
2016-01-12T13:29:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
22,438
import patterns, time, copy import domain.date as date class TaskProperty(property): pass class Task(patterns.ObservableComposite): def __init__(self, subject='', description='', dueDate=None, startDate=None, completionDate=None, budget=None, priority=0, id_=None, lastModificationTime=None, hourlyFee=0, fixedFee=0, reminder=None, attachments=None, categories=None, efforts=None, shouldMarkCompletedWhenAllChildrenCompleted=None, *args, **kwargs): super(Task, self).__init__(*args, **kwargs) self._subject = subject self._description = description self._dueDate = dueDate or date.Date() self._startDate = startDate or date.Today() self._completionDate = completionDate or date.Date() self._budget = budget or date.TimeDelta() self._id = id_ or '%s:%s'%(id(self), time.time()) # FIXME: Not a valid XML id self._efforts = efforts or [] for effort in self._efforts: effort.setTask(self) self._categories = set(categories or []) self._priority = priority self._hourlyFee = hourlyFee self._fixedFee = fixedFee self._reminder = reminder self._attachments = attachments or [] self._shouldMarkCompletedWhenAllChildrenCompleted = \ shouldMarkCompletedWhenAllChildrenCompleted self.setLastModificationTime(lastModificationTime) def __setstate__(self, state): self.setSubject(state['subject']) self.setDescription(state['description']) self.setId(state['id']) self.setStartDate(state['startDate']) self.setDueDate(state['dueDate']) self.setCompletionDate(state['completionDate']) self.replaceChildren(state['children']) self.replaceParent(state['parent']) self.setEfforts(state['efforts']) self.setBudget(state['budget']) self.setCategories(state['categories']) self.setPriority(state['priority']) self.setAttachments(state['attachments']) self.setHourlyFee(state['hourlyFee']) self.setFixedFee(state['fixedFee']) self.shouldMarkCompletedWhenAllChildrenCompleted = \ state['shouldMarkCompletedWhenAllChildrenCompleted'] def __getstate__(self): return dict(subject=self._subject, description=self._description, id=self._id, dueDate=self._dueDate, startDate=self._startDate, completionDate=self._completionDate, children=self.children(), parent=self.parent(), efforts=self._efforts, budget=self._budget, categories=set(self._categories), priority=self._priority, attachments=self._attachments[:], hourlyFee=self._hourlyFee, fixedFee=self._fixedFee, shouldMarkCompletedWhenAllChildrenCompleted=\ self._shouldMarkCompletedWhenAllChildrenCompleted) def __repr__(self): return self._subject def id(self): return self._id def setId(self, id): self._id = id def __notifyObservers(self, event): patterns.Publisher().notifyObservers(event) # I want to use properties more, but I still need to make all the changes. # So, only description is a property right now. def __getDescription(self): return self.__description def __setDescription(self, description): self.__description = description _description = TaskProperty(__getDescription, __setDescription) def description(self): return self._description def setDescription(self, description): if description != self._description: self._description = description self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.description', description)) def allChildrenCompleted(self): if not self.children(): return False for child in self.children(): if not child.completed(): return False return True def copy(self): ''' Copy constructor ''' return self.__class__(self.subject(), self.description(), self.dueDate(), self.startDate(), self.completionDate(), parent=self.parent(), budget=self.budget(), priority=self.priority(), categories=set(self.categories()), fixedFee=self.fixedFee(), hourlyFee=self.hourlyFee(), attachments=self.attachments()[:], reminder=self.reminder(), shouldMarkCompletedWhenAllChildrenCompleted=\ self.shouldMarkCompletedWhenAllChildrenCompleted, children=[child.copy() for child in self.children()]) def newChild(self, subject='New subtask'): ''' Subtask constructor ''' return super(Task, self).newChild(subject=subject, dueDate=self.dueDate(), startDate=max(date.Today(), self.startDate()), parent=self) def addChild(self, child): if child in self.children(): return oldTotalBudgetLeft = self.budgetLeft(recursive=True) oldTotalPriority = self.priority(recursive=True) super(Task, self).addChild(child) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, Task.addChildEventType(), child)) newTotalBudgetLeft = self.budgetLeft(recursive=True) if child.budget(recursive=True): self.notifyObserversOfTotalBudgetChange() if newTotalBudgetLeft != oldTotalBudgetLeft: self.notifyObserversOfTotalBudgetLeftChange() if child.timeSpent(recursive=True): self.notifyObserversOfTotalTimeSpentChange() if child.priority(recursive=True) > oldTotalPriority: self.notifyObserversOfTotalPriorityChange() if child.revenue(recursive=True): self.notifyObserversOfTotalRevenueChange() if child.isBeingTracked(recursive=True): self.notifyObserversOfStartTracking(*child.activeEfforts(recursive=True)) def removeChild(self, child): if child not in self.children(): return oldTotalBudgetLeft = self.budgetLeft(recursive=True) oldTotalPriority = self.priority(recursive=True) super(Task, self).removeChild(child) self.setLastModificationTime() newTotalBudgetLeft = self.budgetLeft(recursive=True) if child.budget(recursive=True): self.notifyObserversOfTotalBudgetChange() if newTotalBudgetLeft != oldTotalBudgetLeft: self.notifyObserversOfTotalBudgetLeftChange() if child.timeSpent(recursive=True): self.notifyObserversOfTotalTimeSpentChange() if child.priority(recursive=True) == oldTotalPriority: self.notifyObserversOfTotalPriorityChange() if child.revenue(recursive=True): self.notifyObserversOfTotalRevenueChange() if child.isBeingTracked(recursive=True) and not \ self.isBeingTracked(recursive=True): self.notifyObserversOfStopTracking(*child.activeEfforts(recursive=True)) def subject(self, recursive=False): ''' The recursive flag is allowed, but ignored. This makes task.sorter.Sorter.__createRegularSortKey easier. ''' return self._subject def setSubject(self, subject): if subject != self._subject: self._subject = subject self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.subject', subject)) def dueDate(self, recursive=False): if recursive: childrenDueDates = [child.dueDate(recursive=True) for child in self.children() if not child.completed()] return min(childrenDueDates + [self._dueDate]) else: return self._dueDate def setDueDate(self, dueDate): if dueDate != self._dueDate: self._dueDate = dueDate self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.dueDate', dueDate)) def startDate(self, recursive=False): if recursive: childrenStartDates = [child.startDate(recursive=True) for child in self.children() if not child.completed()] return min(childrenStartDates+[self._startDate]) else: return self._startDate def setStartDate(self, startDate): if startDate != self._startDate: self._startDate = startDate self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.startDate', startDate)) def timeLeft(self, recursive=False): return self.dueDate(recursive) - date.Today() def completionDate(self, recursive=False): if recursive: childrenCompletionDates = [child.completionDate(recursive=True) \ for child in self.children() if child.completed()] return max(childrenCompletionDates+[self._completionDate]) else: return self._completionDate def setCompletionDate(self, completionDate=None): completionDate = completionDate or date.Today() if completionDate != self._completionDate: self._completionDate = completionDate self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.completionDate', completionDate)) if completionDate != date.Date(): self.setReminder(None) def completed(self): return self.completionDate() != date.Date() def overdue(self): return self.dueDate() < date.Today() and not self.completed() def inactive(self): return (self.startDate() > date.Today()) and not self.completed() def active(self): return not self.inactive() and not self.completed() def dueToday(self): return (self.dueDate() == date.Today() and not self.completed()) def dueTomorrow(self): return (self.dueDate() == date.Tomorrow() and not self.completed()) # effort related methods: def efforts(self, recursive=False): childEfforts = [] if recursive: for child in self.children(): childEfforts.extend(child.efforts(recursive=True)) return self._efforts + childEfforts def activeEfforts(self, recursive=False): return [effort for effort in self.efforts(recursive) \ if effort.isBeingTracked()] def nrActiveEfforts(self): return len(self.activeEfforts()) def isBeingTracked(self, recursive=False): return self.activeEfforts(recursive) def addEffort(self, effort): wasTracking = self.isBeingTracked() if effort not in self._efforts: self._efforts.append(effort) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.effort.add', effort)) if effort.isBeingTracked() and not wasTracking: self.notifyObserversOfStartTracking(effort) self.notifyObserversOfTimeSpentChange() def removeEffort(self, effort): if effort in self._efforts: self._efforts.remove(effort) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.effort.remove', effort)) if effort.isBeingTracked() and not self.isBeingTracked(): self.notifyObserversOfStopTracking(effort) self.notifyObserversOfTimeSpentChange() def setEfforts(self, efforts): self._efforts = efforts # FIXME: no notification? def timeSpent(self, recursive=False): if recursive: return self._myTimeSpent() + self._childrenTimeSpent() else: return self._myTimeSpent() def stopTracking(self): stoppedEfforts = [] for effort in self.activeEfforts(): effort.setStop() stoppedEfforts.append(effort) if stoppedEfforts: self.setLastModificationTime() return stoppedEfforts def budget(self, recursive=False): result = self._budget if recursive: for task in self.children(): result += task.budget(recursive) return result def setBudget(self, budget): if budget != self._budget: self._budget = budget self.setLastModificationTime() self.notifyObserversOfBudgetChange() self.notifyObserversOfBudgetLeftChange() def budgetLeft(self, recursive=False): budget = self.budget(recursive) if budget: return budget - self.timeSpent(recursive) else: return budget def _myTimeSpent(self): return sum([effort.duration() for effort in self.efforts()], date.TimeDelta()) def _childrenTimeSpent(self): return sum([child.timeSpent(recursive=True) \ for child in self.children()], date.TimeDelta()) def notifyObserversOfBudgetChange(self): self.__notifyObservers(patterns.Event(self, 'task.budget', self.budget())) self.notifyObserversOfTotalBudgetChange() def notifyObserversOfTotalBudgetChange(self): self.__notifyObservers(patterns.Event(self, 'task.totalBudget', self.budget(recursive=True))) parent = self.parent() if parent: parent.notifyObserversOfTotalBudgetChange() def notifyObserversOfBudgetLeftChange(self): self.__notifyObservers(patterns.Event(self, 'task.budgetLeft', self.budgetLeft())) self.notifyObserversOfTotalBudgetLeftChange() def notifyObserversOfTotalBudgetLeftChange(self): self.__notifyObservers(patterns.Event(self, 'task.totalBudgetLeft', self.budgetLeft(recursive=True))) parent = self.parent() if parent: parent.notifyObserversOfTotalBudgetLeftChange() def notifyObserversOfTimeSpentChange(self): self.__notifyObservers(patterns.Event(self, 'task.timeSpent', self.timeSpent())) self.notifyObserversOfTotalTimeSpentChange() if self.budget(): self.notifyObserversOfBudgetLeftChange() elif self.budget(recursive=True): self.notifyObserversOfTotalBudgetLeftChange() if self.hourlyFee() > 0: self.notifyObserversOfRevenueChange() def notifyObserversOfTotalTimeSpentChange(self): self.__notifyObservers(patterns.Event(self, 'task.totalTimeSpent', self.timeSpent(recursive=True))) parent = self.parent() if parent: parent.notifyObserversOfTotalTimeSpentChange() def notifyObserversOfStartTracking(self, *trackedEfforts): self.__notifyObservers(patterns.Event(self, 'task.track.start', *trackedEfforts)) parent = self.parent() if parent: parent.notifyObserversOfStartTracking(*trackedEfforts) def notifyObserversOfStopTracking(self, *trackedEfforts): self.__notifyObservers(patterns.Event(self, 'task.track.stop', *trackedEfforts)) parent = self.parent() if parent: parent.notifyObserversOfStopTracking(*trackedEfforts) # categories def categories(self, recursive=False): result = set(self._categories) if recursive and self.parent() is not None: result |= self.parent().categories(recursive=True) return result def addCategory(self, category): if category not in self._categories: self._categories.add(category) category.addTask(self) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.category.add', category)) def removeCategory(self, category): if category in self._categories: self._categories.discard(category) category.removeTask(self) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.category.remove', category)) def setCategories(self, categories): self._categories = categories # FIXME: no notification? # priority def priority(self, recursive=False): if recursive: childPriorities = [child.priority(recursive=True) \ for child in self.children()] return max(childPriorities + [self._priority]) else: return self._priority def setPriority(self, priority): if priority != self._priority: self._priority = priority self.setLastModificationTime() self.notifyObserversOfPriorityChange() def notifyObserversOfPriorityChange(self): self.__notifyObservers(patterns.Event(self, 'task.priority', self.priority())) self.notifyObserversOfTotalPriorityChange() def notifyObserversOfTotalPriorityChange(self): myTotalPriority = self.priority(recursive=True) self.__notifyObservers(patterns.Event(self, 'task.totalPriority', myTotalPriority)) parent = self.parent() if parent and myTotalPriority == parent.priority(recursive=True): parent.notifyObserversOfTotalPriorityChange() # modifications def lastModificationTime(self, recursive=False): if recursive: childModificationTimes = [child.lastModificationTime(recursive=True) for child in self.children()] return max(childModificationTimes + [self._lastModificationTime]) else: return self._lastModificationTime def setLastModificationTime(self, time=None): self._lastModificationTime = time or date.DateTime.now() # revenue def hourlyFee(self, recursive=False): return self._hourlyFee def setHourlyFee(self, hourlyFee): if hourlyFee != self._hourlyFee: self._hourlyFee = hourlyFee self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.hourlyFee', hourlyFee)) if self.timeSpent() > date.TimeDelta(): self.notifyObserversOfRevenueChange() def revenue(self, recursive=False): if recursive: childRevenues = sum(child.revenue(recursive) for child in self.children()) else: childRevenues = 0 return self.timeSpent().hours() * self.hourlyFee() + self.fixedFee() + childRevenues def fixedFee(self, recursive=False): if recursive: childFixedFees = sum(child.fixedFee(recursive) for child in self.children()) else: childFixedFees = 0 return self._fixedFee + childFixedFees def setFixedFee(self, fixedFee): if fixedFee != self._fixedFee: self._fixedFee = fixedFee self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.fixedFee', fixedFee)) self.notifyObserversOfRevenueChange() def notifyObserversOfRevenueChange(self): self.__notifyObservers(patterns.Event(self, 'task.revenue', self.revenue())) self.notifyObserversOfTotalRevenueChange() def notifyObserversOfTotalRevenueChange(self): self.__notifyObservers(patterns.Event(self, 'task.totalRevenue', self.revenue(recursive=True))) parent = self.parent() if parent: parent.notifyObserversOfTotalRevenueChange() # reminder def reminder(self): return self._reminder def setReminder(self, reminderDateTime=None): if reminderDateTime == date.DateTime.max: reminderDateTime = None if reminderDateTime != self._reminder: self._reminder = reminderDateTime self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.reminder', self._reminder)) # attachments def attachments(self): return self._attachments def addAttachments(self, *attachments): if attachments: self._attachments.extend(attachments) self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.attachment.add', *attachments)) def removeAttachments(self, *attachments): attachmentsRemoved = [] for attachment in attachments: if attachment in self._attachments: self._attachments.remove(attachment) attachmentsRemoved.append(attachment) if attachmentsRemoved: self.setLastModificationTime() self.__notifyObservers(patterns.Event(self, 'task.attachment.remove', *attachmentsRemoved)) def removeAllAttachments(self): self.removeAttachments(*self._attachments) def setAttachments(self, attachments): self._attachments = attachments # FIXME: no notification? # behavior # To experiment, this attribute is coded by means of a proporty, which # means you can set it like this: task.shouldMark... = True def __setShouldMarkCompletedWhenAllChildrenCompleted(self, newValue): if newValue == self._shouldMarkCompletedWhenAllChildrenCompleted: return self._shouldMarkCompletedWhenAllChildrenCompleted = newValue self.__notifyObservers(patterns.Event(self, 'task.setting.shouldMarkCompletedWhenAllChildrenCompleted', newValue)) def __getShouldMarkCompletedWhenAllChildrenCompleted(self): return self._shouldMarkCompletedWhenAllChildrenCompleted shouldMarkCompletedWhenAllChildrenCompleted = \ property(fget=__getShouldMarkCompletedWhenAllChildrenCompleted, fset=__setShouldMarkCompletedWhenAllChildrenCompleted)
[ "hieronymus_schweiz@yahoo.de" ]
hieronymus_schweiz@yahoo.de
713482388b498523e139b006bdf1fc3b83d5e35b
74a188c3470cba0d69c437fe1c1b93e63d8327b8
/part1/lesson25_part1_demo/vacancies/models.py
802551eb9eb98c818666f208309d34bf0b98c3c2
[]
no_license
skypro-008/lesson25
81aa8155409ed600eab1aea0c454632a2822f051
2d2c91879cde763743a22477e813218e00cb5a76
refs/heads/master
2023-08-28T15:46:25.400658
2021-09-20T15:26:45
2021-09-20T15:26:45
407,803,797
0
1
null
null
null
null
UTF-8
Python
false
false
104
py
from django.db import models class Vacancy(models.Model): text = models.CharField(max_length=1000)
[ "aalmukhametova@gmail.com" ]
aalmukhametova@gmail.com
8a33374b9c01ded55865a5c9464ca843e32074d6
37220d7b60d682eb1abf40326d061485581aab36
/ajax/urls.py
b06af246996089bc8452ee5a25eabcdc705623a1
[ "BSD-3-Clause" ]
permissive
lautarianoo/LautAvito
547fba9a0bb3a65aac6132e00382a8876bca4a28
106dcb6f04230af2540bd3883c85713828cd051c
refs/heads/master
2023-06-24T11:08:17.889875
2021-07-26T17:30:08
2021-07-26T17:30:08
377,897,865
3
0
null
null
null
null
UTF-8
Python
false
false
137
py
from django.urls import path from . import views urlpatterns = [ path('get_districts', views.get_districts, name='get_districts') ]
[ "neonchick1" ]
neonchick1
9a3303167a28af09eda1b153ab50bdbab82cba7b
c91ef1310c905771477f11d5694b4991926152c5
/mysite/blog/tests/test_views.py
224c3168e12e378d809035801e136169f1eb3e36
[]
no_license
Yuichi-kawasaki/python-blog
248388ca142294eb937718801b8a02825f614c05
3f91618c0ebdb29cbfaab32d56fd727b39040879
refs/heads/master
2023-05-12T16:18:57.617116
2021-05-30T07:54:36
2021-05-30T07:54:36
372,155,603
0
0
null
null
null
null
UTF-8
Python
false
false
5,674
py
from django.test import TestCase from django.urls import reverse from ..models import Post class PostListTests(TestCase): def setUp(self): """ テスト環境の準備用メソッド。名前は必ず「setUp」とすること。 同じテストクラス内で共通で使いたいデータがある場合にここで作成する。 """ post1 = Post.objects.create(title='title1', text='text1') post2 = Post.objects.create(title='title2', text='text2') def test_get(self): """GET メソッドでアクセスしてステータスコード200を返されることを確認""" response = self.client.get(reverse('blog:post_list')) self.assertEqual(response.status_code, 200) def test_get_2posts_by_list(self): """GET でアクセス時に、setUp メソッドで追加した 2件追加が返されることを確認""" response = self.client.get(reverse('blog:post_list')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( # Postモデルでは __str__ の結果としてタイトルを返す設定なので、返されるタイトルが投稿通りになっているかを確認 response.context['post_list'], ['<Post: title1>', '<Post: title2>'], ordered = False # 順序は無視するよう指定 ) self.assertContains(response, 'title1') # html 内に post1 の title が含まれていることを確認 self.assertContains(response, 'title2') # html 内に post2 の title が含まれていることを確認 def tearDown(self): """ setUp で追加したデータを消す、掃除用メソッド。 create とはなっているがメソッド名を「tearDown」とすることで setUp と逆の処理を行ってくれる=消してくれる。 """ post1 = Post.objects.create(title='title1', text='text1') post2 = Post.objects.create(title='title2', text='text2') class PostCreateTests(TestCase): """PostCreateビューのテストクラス.""" def test_get(self): """GET メソッドでアクセスしてステータスコード200を返されることを確認""" response = self.client.get(reverse('blog:post_create')) self.assertEqual(response.status_code, 200) def test_post_with_data(self): """適当なデータで POST すると、成功してリダイレクトされることを確認""" data = { 'title': 'test_title', 'text': 'test_text', } response = self.client.post(reverse('blog:post_create'), data=data) self.assertEqual(response.status_code, 302) def test_post_null(self): """空のデータで POST を行うとリダイレクトも無く 200 だけ返されることを確認""" data = {} response = self.client.post(reverse('blog:post_create'), data=data) self.assertEqual(response.status_code, 200) class PostDetailTests(TestCase): # 追加 """PostDetailView のテストクラス""" def test_not_fount_pk_get(self): """記事を登録せず、空の状態で存在しない記事のプライマリキーでアクセスした時に 404 が返されることを確認""" response = self.client.get( reverse('blog:post_detail', kwargs={'pk': 1}), ) self.assertEqual(response.status_code, 404) def test_get(self): """GET メソッドでアクセスしてステータスコード200を返されることを確認""" post = Post.objects.create(title='test_title', text='test_text') response = self.client.get( reverse('blog:post_detail', kwargs={'pk': post.pk}), ) self.assertEqual(response.status_code, 200) self.assertContains(response, post.title) self.assertContains(response, post.text) class PostUpdateTests(TestCase): # 追加 """PostUpdateView のテストクラス""" def test_not_fount_pk_get(self): """記事を登録せず、空の状態で存在しない記事のプライマリキーでアクセスした時に 404 が返されることを確認""" response = self.client.get( reverse('blog:post_update', kwargs={'pk': 1}), ) self.assertEqual(response.status_code, 404) def test_get(self): """GET メソッドでアクセスしてステータスコード200を返されることを確認""" post = Post.objects.create(title='test_title', text='test_text') response = self.client.get( reverse('blog:post_update', kwargs={'pk': post.pk}), ) self.assertEqual(response.status_code, 200) self.assertContains(response, post.title) self.assertContains(response, post.text) class PostDeleteTests(TestCase): # 追加 """PostDeleteView のテストクラス""" def test_not_fount_pk_get(self): """記事を登録せず、空の状態で存在しない記事のプライマリキーでアクセスした時に 404 が返されることを確認""" response = self.client.get( reverse('blog:post_delete', kwargs={'pk': 1}), ) self.assertEqual(response.status_code, 404) def test_get(self): """GET メソッドでアクセスしてステータスコード200を返されることを確認""" post = Post.objects.create(title='test_title', text='test_text') response = self.client.get( reverse('blog:post_delete', kwargs={'pk': post.pk}), ) self.assertEqual(response.status_code, 200) self.assertContains(response, post.title) self.assertContains(response, post.text)
[ "jiv.yu1.kawasaki@gmail.com" ]
jiv.yu1.kawasaki@gmail.com
8c4e0732907c0a50c71b4fd46d7db075c8ad46a5
760fbdca58de7e2fb146ec60905ded7497b1812b
/ibm_whcs_sdk/insights_for_medical_literature/tests/integration/test_search_typeahead.py
ee752832dabdf1753e9150dd069ef924defa1b65
[ "Apache-2.0" ]
permissive
dmansjur/whcs-python-sdk
c5d28742cefc65e19a7eb5de0027fe9f59b1e689
110a847c91d5779df91c6562394bde557ee132e5
refs/heads/master
2021-05-26T21:49:44.515561
2020-04-07T17:17:36
2020-04-07T17:17:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,587
py
# coding: utf-8 # Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is an example of invoking the /v1/corpora/{corpus}/search/{corpus}/typeahead REST API # of Insights for Medical Literature. import configparser import ibm_whcs_sdk.insights_for_medical_literature as wh # To access a secure environment additional parameters are needed on the constructor which are listed below CONFIG = configparser.RawConfigParser() CONFIG.read('./ibm_whcs_sdk/insights_for_medical_literature/tests/config.ini') BASE_URL = CONFIG.get('settings', 'base_url') APIKEY = CONFIG.get('settings', 'key') IAMURL = CONFIG.get('settings', 'iam_URL') LEVEL = CONFIG.get('settings', 'logging_level') VERSION = CONFIG.get('settings', 'version') DISABLE_SSL = CONFIG.get('settings', 'disable_ssl') VERSION = CONFIG.get('settings', 'version') CORPUS = CONFIG.get('settings', 'corpus') ONTOLGOY = CONFIG.get('search', 'umls') QUERY = CONFIG.get('search', 'typeahead_query') TYPE = CONFIG.get('search', 'typeahead_type') IML_TEST = wh.InsightsForMedicalLiteratureServiceV1(BASE_URL, APIKEY, IAMURL, VERSION, LEVEL, DISABLE_SSL) # test can only be successful against a custom plan intance def test_search_typeahead(): types = [TYPE] ontologies = [ONTOLGOY] response = IML_TEST.typeahead(corpus=CORPUS, query=QUERY, types=types, category='disorders', verbose=False, limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies) concept_list = wh.ConceptListModel._from_dict(response.get_result()) assert concept_list is not None concepts = concept_list.concepts for concept in concepts: assert concept.cui is not None assert concept.ontology is not None def test_search_typeahead_verbose(): types = [TYPE] ontologies = [ONTOLGOY] response = IML_TEST.typeahead(corpus=CORPUS, query=QUERY, types=types, category='disorders', verbose=True, limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies) concept_list = wh.ConceptListModel._from_dict(response.get_result()) assert concept_list is not None concepts = concept_list.concepts for concept in concepts: assert concept.cui is not None assert concept.ontology is not None def test_search_typeahead_no_corpus(): types = [TYPE] ontologies = [ONTOLGOY] try: response = IML_TEST.typeahead(corpus=None, query=QUERY, types=types, category='disorders', verbose=True, limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies) except ValueError as imle: assert imle is not None def test_search_typeahead_verbose_no_query(): types = [TYPE] ontologies = [ONTOLGOY] try: response = IML_TEST.typeahead(corpus=CORPUS, query=None, types=types, category='disorders', verbose=True, limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies) except ValueError as imle: assert imle is not None
[ "dcweber@us.ibm.com" ]
dcweber@us.ibm.com
72627daf23c16323fb9cc7908b77948306504c43
dee83fb6b0bd496b01dd0c216ee6e1a62012abe3
/users/views.py
ddbf99c3ca818a3722abba0e5c78294208027386
[]
no_license
RJustinePanopio/ems_proj
82f56112ba3abf9e5548376b4deed92db04c0d47
4ea38eebc1fa4738c845570e96f8055238f2ed4a
refs/heads/master
2020-04-04T18:00:33.016146
2018-11-08T04:04:53
2018-11-08T04:04:53
156,146,279
0
0
null
null
null
null
UTF-8
Python
false
false
311
py
from django.shortcuts import render from django.urls import reverse_lazy from django.views.generic import CreateView from .forms import ParticipantCreationForm class SignUpView(CreateView): form_class = ParticipantCreationForm template_name = "signup.html" success_url = reverse_lazy("login")
[ "20anime15@gmail.com" ]
20anime15@gmail.com
2bd347367d16337128c24e338d3460b3fa8647c9
ebc4394518b59bab29fe23452c23918f3b3d93b7
/agriculture/crop-yields/plot_topography.py
f950da826979b1eaf89377b64a22ee42afcaa3c5
[ "MIT" ]
permissive
gkilleen33/rs-economics
a1673f041d66cf04db0d38940da230def05edeb8
4b1149925695c9fe801d22912d28a4c022ba9079
refs/heads/master
2021-07-05T05:16:12.753945
2021-06-12T18:49:22
2021-06-12T18:49:22
243,337,981
3
1
MIT
2021-03-20T22:55:33
2020-02-26T18:40:34
Python
UTF-8
Python
false
false
2,283
py
# -*- coding: utf-8 -*- import ee ee.Initialize() ############################################################### # ENTER USER INPUTS HERE ############################################################### plot_boundaries = ee.FeatureCollection() # Upload plot boundary data (e.g. using the Google Earth Engine console) and insert the asset ID here, in single or double quotes # Export information (to Google Drive) output_folder = 'EXAMPLE_FOLDER' # Folder name to save outputs in Google drive. The folder should be created before running the script. output_file = 'EXAMPLE_FILE_NAME' # Output file name ############################################################## # END USER INPUTS ############################################################## #Construct AOI aoi = plot_boundaries.geometry().bounds() # Import elevation data elevation = ee.Image('USGS/SRTMGL1_003').select('elevation').clip(aoi) # Store the native resolution of the SRTM data srtm_scale = elevation.projection().nominalScale().getInfo() # Calculate slope and aspect terrain = ee.Terrain.products(elevation) # If the scale of the smallest item in 'plot_boundaries' is less than the native resolution of the GPM data, then set the variable scale to the min scale of polygons, otherwise set it to the native resolution def addArea(feature): return feature.set({'area': feature.geometry().area()}) polygons_with_area = plot_boundaries.map(addArea) min_polygon_area = polygons_with_area.reduceColumns(reducer=ee.Reducer.min(), selectors=['area']).getInfo()['min'] if min_polygon_area < (gpm_scale)**2: scale = max(10, math.sqrt(min_polygon_area)) # In case there are any polygons with very small area else: scale = gpm_scale # Calculate zonal stats zonal_stats = terrain.reduceRegions(reducer=ee.Reducer.mean(), collection=plot_boundaries, scale=scale, tileScale=4) # Remove geometry def removeGeometry(feature): return feature.setGeometry(None) zonal_stats_no_geometry = zonal_stats.map(removeGeometry) # Export the data to Google Drive task = ee.batch.Export.table.toDrive(collection=zonal_stats_no_geometry, description=output_file, fileFormat='CSV', fileNamePrefix=output_file, folder=output_folder) task.start()
[ "gradykilleen@gmail.com" ]
gradykilleen@gmail.com
399c43ffe2bee74fd813a3ea733be816e7ba391b
f6f8323999c8ec13e14f9354a64f9a393f7ec032
/volc/volc.py
2c14c090661353f004dce8a2ee943c14611263a2
[]
no_license
jrybicki-jsc/rusty
8a3b0776428eca61f9a82af139df0bae7aedb923
d6e1ef23f3abc2ca366d9f64e57efaff601eabbd
refs/heads/master
2022-12-19T06:35:35.765175
2020-10-03T08:32:31
2020-10-03T08:32:31
267,676,930
0
0
null
null
null
null
UTF-8
Python
false
false
2,842
py
import sys import math import random import pygame as pg pg.init() BLACK = (0, 0, 0) WHITE = (255, 255, 255) LT_GRAY = (180, 180, 180) GRAY = (120, 120, 120) DK_GRAY = (80, 80, 80) class Particle(pg.sprite.Sprite): gases_colors = {'SO2': LT_GRAY, 'CO2': GRAY, 'H2S': DK_GRAY, 'H2O': WHITE} VENT_LOCATION = (320, 300) IO_SURFACE_Y = 308 VELOCITY_SO2 = 8 GRAVITY = 0.5 vel_scalar = {'SO2': 1, 'CO2': 1.45, 'H2S': 1.9, 'H2O': 3.6 } def __init__(self, screen, background): super().__init__() self.screen = screen self.background = background self.image = pg.Surface((4, 4)) self.rect = self.image.get_rect() self.gas = random.choice(list(Particle.gases_colors.keys())) self.gas = 'H2S' self.color = Particle.gases_colors[self.gas] self.vel = Particle.VELOCITY_SO2 * Particle.vel_scalar[self.gas] self.x, self.y = Particle.VENT_LOCATION self.vector() def vector(self): orient = random.uniform(60, 120) radians = math.radians(orient) self.dx = self.vel * math.cos(radians) self.dy = -self.vel * math.sin(radians) def update(self): self.dy += Particle.GRAVITY pg.draw.line(self.background, self.color, (self.x, self.y), (self.x + self.dx, self.y + self.dy)) r, g, b = self.color if (max(self.color) < 255): self.color = (r+5, g+5, b+5) self.x += self.dx self.y += self.dy if self.x < 0 or self.x > self.screen.get_width(): self.kill() if self.y < 0 or self.y > Particle.IO_SURFACE_Y: self.kill() def main(): screen = pg.display.set_mode((639, 360)) pg.display.set_caption('Io Volcano Simulator') background = pg.image.load('tvashtar_plume.gif') screen.blit(background, (0, 0)) legend_font = pg.font.SysFont('None', 24) water_label = legend_font.render('---H2O', True, WHITE, BLACK) h2s_label = legend_font.render('---H2S', True, DK_GRAY, BLACK) co2_label = legend_font.render('---CO2', True, GRAY, BLACK) so2_label = legend_font.render('---SO2/S2', True, LT_GRAY, BLACK) particles = pg.sprite.Group() clock = pg.time.Clock() while True: clock.tick(25) particles.add(Particle(screen, background)) for event in pg.event.get(): if event.type == pg.QUIT: pg.quit() sys.exit() screen.blit(background, (0, 0)) screen.blit(water_label, (40, 20)) screen.blit(h2s_label, (40, 40)) screen.blit(co2_label, (40, 60)) screen.blit(so2_label, (40, 80)) particles.update() particles.draw(screen) pg.display.flip() if __name__ == "__main__": main()
[ "j.rybicki@fz-juelich.de" ]
j.rybicki@fz-juelich.de
49c119a4c7491a7b5b8bcf0c18b0dbbd7e0c9b34
19ac8aa8ee916cef99ddc85b6565c4d6fbe40749
/FunctionsAndFunctionalProgramming/functionalFizzing.py
953c863c737f9d99921591c2b75d1cc537db621e
[]
no_license
Darrenrodricks/IntermediatePythonNanodegree
53570bb1b97d9d10d6e6bd19d3a1f8f654a1cfe9
5e597fbe147c23b694fc9b354797e443f0a87a67
refs/heads/main
2023-06-25T18:54:38.962503
2021-07-28T17:15:44
2021-07-28T17:15:44
389,006,852
0
0
null
null
null
null
UTF-8
Python
false
false
313
py
import helper """Generate an infinite stream of successively larger random lists.""" def generate_cases(): a = 0 while True: yield helper.random_list(a) a += 1 if __name__ == '__main__': for case in generate_cases(): if len(case) > 10: break print(case)
[ "noreply@github.com" ]
noreply@github.com
0eb114fa558bb9e5f5e7d4a9501ddd53fa14a7b6
a0dde47bec13e16d9e39ff6a5d321770d6fa8c80
/app/conf/index.py
d31cac1eb0bb44104073fe9304d41982934896cb
[]
no_license
siddharthpanda99/FlaskService
4ed397155ee1be1f8ce681b333d0291eed2c3cd0
c93414eb4a77239802c0f662df7602ad02a6e7de
refs/heads/master
2020-06-14T15:39:55.028554
2017-07-29T03:26:40
2017-07-29T03:26:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,209
py
from __future__ import print_function from __future__ import unicode_literals from flask_cors import CORS from flask_httpauth import HTTPBasicAuth from auth import configure_auth from cache import configure_cache from mongo import configure_mongo from pubsub import configure_kafka from subscribers import register_subscribers from apis import load_apis from models import load_models from utils.mission import * # configure query app def configure_app(app, config): CORS(app) # cross domain tools = { 'auth': configure_auth(app, config), 'cache': configure_cache(app, config) } if config.getboolean('hippo', 'mongo'): print('connect MongoDB...') tools['mongo'] = configure_mongo(config) if config.getboolean('hippo', 'pubsub'): print('connect Kafka...') tools['pubsub'] = configure_kafka(config) models = load_models(config, tools) apis = load_apis(config, tools, models) if 'pubsub' in tools: print('register Subscribers...') register_subscribers(config, tools, models) if config.getboolean('hippo', 'refresh_data'): init_tasks(models) init_events(models) return tools, apis
[ "b96705008@gmail.com" ]
b96705008@gmail.com
29c5c69336a38d9dcf6b6df89d61cecd8ee4bc24
2a1885f175d863e425682e4fd697fe2a1bdfb2a1
/memberactivityapp/migrations/0001_initial.py
3d680bf25b4f0bbff2c202bfb9009133269ea547
[]
no_license
Anshum4512501/fullthrottle_work
133a7da214e7c80e690fb50c3af7c74f57c579ad
8a076b7c61377e585f06bd314bccd9eb476c87d8
refs/heads/master
2022-11-28T23:36:54.771044
2020-08-12T10:04:04
2020-08-12T10:04:04
286,205,623
0
0
null
null
null
null
UTF-8
Python
false
false
1,090
py
# Generated by Django 3.1 on 2020-08-08 11:26 from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Members', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('real_name', models.CharField(max_length=20, null=True)), ('tz', models.CharField(blank=True, max_length=100, null=True)), ], ), migrations.CreateModel( name='Activity_Periods', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_time', models.DateTimeField()), ('end_time', models.DateTimeField()), ('members', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='memberactivityapp.members')), ], ), ]
[ "anshum45@gmail.com" ]
anshum45@gmail.com
d5f638b16f492c7594a3fcea541c45e8aae9fab2
2435099201902a12689621baba62f7799a260ae3
/backend/red_frost_25038/urls.py
990009da86771f717c4f523d324c2d500dcb88a5
[]
no_license
crowdbotics-apps/red-frost-25038
eab0bada99927f8f7d76f4866bbcf042be762a0d
cfb48c84f707a558d0cf6405f5057371bdcb2778
refs/heads/master
2023-03-30T10:07:45.116090
2021-03-15T15:46:25
2021-03-15T15:46:25
348,029,901
0
0
null
null
null
null
UTF-8
Python
false
false
2,217
py
"""red_frost_25038 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include, re_path from django.views.generic.base import TemplateView from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("modules/", include("modules.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), ] admin.site.site_header = "Red Frost" admin.site.site_title = "Red Frost Admin Portal" admin.site.index_title = "Red Frost Admin" # swagger api_info = openapi.Info( title="Red Frost API", default_version="v1", description="API documentation for Red Frost App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ] urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))] urlpatterns += [re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name='index.html'))]
[ "team@crowdbotics.com" ]
team@crowdbotics.com
d7b6cc4e3530d46233da220faa7ff26f21edad7d
c761eca92ffd77f0473d5b8a211167965531f662
/redash/__init__.py
89c355dba1073b93ee4027093d3cbca47d0b076a
[ "LicenseRef-scancode-unknown-license-reference", "BSD-2-Clause" ]
permissive
solutionrooms/testredash
777cd1a7e24abcbf9de866e34f36d3530f1b68bd
8decda0b55defd24fa627b24e7c5fcd377062617
refs/heads/master
2021-01-20T11:14:31.950310
2016-06-23T21:49:08
2016-06-23T21:49:08
61,838,772
0
0
null
null
null
null
UTF-8
Python
false
false
3,948
py
import logging import urlparse import redis from flask import Flask from flask_sslify import SSLify from werkzeug.contrib.fixers import ProxyFix from werkzeug.routing import BaseConverter, ValidationError from statsd import StatsClient from flask_mail import Mail from redash import settings from redash.query_runner import import_query_runners from redash.destinations import import_destinations __version__ = '0.11.0+b1922' if settings.FEATURE_TABLES_PERMISSIONS: # TODO(@arikfr): remove this warning on next version release print "You have table based permissions enabled, but this feature was removed." print "Please use new data sources based permission model." print "(re:dash won't load until you turn off this feature)" exit(1) def setup_logging(): handler = logging.StreamHandler() formatter = logging.Formatter('[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] %(message)s') handler.setFormatter(formatter) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(settings.LOG_LEVEL) logging.getLogger("passlib").setLevel("ERROR") def create_redis_connection(): redis_url = urlparse.urlparse(settings.REDIS_URL) if redis_url.scheme == 'redis+socket': qs = urlparse.parse_qs(redis_url.query) if 'virtual_host' in qs: db = qs['virtual_host'][0] else: db = 0 r = redis.StrictRedis(unix_socket_path=redis_url.path, db=db) else: if redis_url.path: redis_db = redis_url.path[1] else: redis_db = 0 r = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_url.password) return r setup_logging() redis_connection = create_redis_connection() mail = Mail() mail.init_mail(settings.all_settings()) statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX) import_query_runners(settings.QUERY_RUNNERS) import_destinations(settings.DESTINATIONS) from redash.version_check import reset_new_version_status reset_new_version_status() class SlugConverter(BaseConverter): def to_python(self, value): # This is an ugly workaround for when we enable multi-org and some files are being called by the index rule: if value in ('google_login.png', 'favicon.ico', 'robots.txt', 'views'): raise ValidationError() return value def to_url(self, value): return value def create_app(): from redash import handlers from redash.admin import init_admin from redash.models import db from redash.authentication import setup_authentication from redash.metrics.request import provision_app app = Flask(__name__, template_folder=settings.STATIC_ASSETS_PATHS[-1], static_folder=settings.STATIC_ASSETS_PATHS[-1], static_path='/static') # Make sure we get the right referral address even behind proxies like nginx. app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT) app.url_map.converters['org_slug'] = SlugConverter if settings.ENFORCE_HTTPS: SSLify(app, skips=['ping']) if settings.SENTRY_DSN: from raven.contrib.flask import Sentry from raven.handlers.logging import SentryHandler sentry = Sentry(app, dsn=settings.SENTRY_DSN) sentry.client.release = __version__ sentry_handler = SentryHandler(settings.SENTRY_DSN) sentry_handler.setLevel(logging.ERROR) logging.getLogger().addHandler(sentry_handler) # configure our database settings.DATABASE_CONFIG.update({'threadlocals': True}) app.config['DATABASE'] = settings.DATABASE_CONFIG app.config.update(settings.all_settings()) provision_app(app) init_admin(app) db.init_app(app) mail.init_app(app) setup_authentication(app) handlers.init_app(app) return app
[ "jon.scott@solutionrooms.com" ]
jon.scott@solutionrooms.com
4dfd9b395dbd48e56d02530c1187d3870308bd63
39f2d8e5e16cdb173e0c46c79fadee87db040cb3
/spiderDigikeyMain.py
a2728dcd8e11cb264a3aa603fb266c31d6b61378
[]
no_license
afrunk/spiderClock
3345723c2604cfedf6d6adae97e86c405728012b
e046c2477c27e1ce772b9e6d5c169e1973c9e28f
refs/heads/master
2020-07-06T19:46:08.025145
2019-08-27T14:18:19
2019-08-27T14:18:19
203,121,676
0
0
null
null
null
null
UTF-8
Python
false
false
23,871
py
# 爬取的时候需要修改的地方 ''' - 表名: - get_html_sensor() 函数中更新链接状态 - read_all_urls() 获取链接存入数据 - get_images_new() 获取图片链接 ''' import requests from bs4 import BeautifulSoup import pymysql import os import time # 链接数据库 切换时候修改db即可 db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='password', db='digikeydata', charset='utf8') cursor = db.cursor() # 请求头部 headers ={ 'authority':'www.digikey.com', 'method':'GET', 'path':'/products/en/audio-products/accessories/159', 'scheme':'https', 'https':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 'accept-encoding':'gzip, deflate, br', 'accept-language':'zh-HK,zh;q=0.9,zh-CN;q=0.8,en;q=0.7,en-US;q=0.6,zh-TW;q=0.5', 'cache-control':'max-age=0', 'cookie':"instart_html_streaming=false; instart_reload_reason=10; instart_reload_additional_info=Intercepted function: querySelector On: [object HTMLDocument] of type: #document with selectors: div.homepage-featured__content > div.featured-content:last-child; csscxt=399247882.20480.0000; dtCookie=E593B64A943DF4BCEC8334D12DCE8769|X2RlZmF1bHR8MQ; TS01cecf1b=01460246b6c2e35404c26799fba2a10822329b78e13676af38990f16ea078c5c99fb386e9a029d91a2397ea30cb5d5fe62385115a4; TS0138bc45=01460246b6c67ca8f8d8a1bf49ea3d6b00adb9f25d7d40b542edf7c7e353f23fc2ee2b03440552ec11384e0eea1e2b13170be06ab7; pf-accept-language=en-US; ping-accept-language=en-US; TS0184e6b9=01460246b63256f943044cc114a6b76032d812898d9355edde17f55a219c1a72a0d98bb4d39176c46b207b4f2f0431265d08c248f1; i10c.ss=1566047760615; i10c.uid=1566047760617:1662; optimizelyEndUserId=oeu1566047761611r0.9361870224501541; EG-U-ID=D8ae9e2fda-fe35-481c-aadf-d8caf7926d1c; EG-S-ID=E861866b1c-ad90-49ee-ab82-367ee49fcb6c; _ga=GA1.2.2085766613.1566048334; _gid=GA1.2.310825462.1566048334; _msuuid_27490huz44870=44703864-FB2B-46FD-BB87-F79ECC5276D9; _evga_8774=3a916442c95712cc.; TS017613a9=01460246b625607c93ebeea121aa44b919377e5316c586db54cd585a8211b120c38a8ffdcd138425e7c8fe1a1bb0907e70062bcfbd; WRIgnore=true; TS013c3a0b=01460246b64198bd3f88181bd3a285e5cc9b890647a041ee5e3628871d42945519c9be001f7f567a3df7d4a461e63c14ce29ce0315; TS018060f7=01460246b68cd723277b854f22d3b68024f139adb71fc769c679e3b46b517ea1998a19d1348966a5fc96fcc4f583ba562d444eef9e; _aa7988=1x9a62; utag_main=v_id:016c9fb89b59001609e768ce2cc803073006206b00bd0$_sn:2$_ss:0$_st:1566056404179$ses_id:1566054142565%3Bexp-session$_pn:2%3Bexp-session; website#lang=; TS01d239f3=01460246b619af0364be922e0e3e45fc7326eaf281031c7f705e6aab6c6f062b13012669c1e19c75abc7c03afddb3be01e5458304c; TS01d5128f=01460246b60fe611ec18383c61ab63da1cec4e0a0f21627d14e8a2c059bf7575702c9322029da4c4ae2651fd0ce7479e1f978fa3b3; _gat_Production=1; QSI_HistorySession=https%3A%2F%2Fwww.digikey.com%2Fproducts%2Fen%2Fintegrated-circuits-ics%2Fclock-timing-clock-buffers-drivers%2F764%2Fpage%2F2~1566048340828%7Chttps%3A%2F%2Fwww.digikey.com%2Fproducts%2Fen%2Faudio-products%2Faccessories%2F159~1566049192229%7Chttps%3A%2F%2Fwww.digikey.com%2Fproducts%2Fen~1566054611170; __CT_Data=gpv=4&ckp=tld&dm=digikey.com&apv_53368_www=8&cpv_53368_www=4; utm_data_x=part_family%3DAccessories%2Chtml_element1%3Dcatfilterlink%2Chtml_element2%3D%2Chtml_element3%3Dcatfiltersub%2Cref_page_type%3DPS%2Cref_page_sub_type%3DCAT%2Cref_page_id%3DCAT%2Cref_page_event%3DSelect%20Family%2Chtml_element4%3DproductIndexList%2Cundefined%3Dcontent-container%2CExtRun%3D409.1%2Cccookie%3D2019-08-17T13%3A25%3A33.869Z%2CExtRun%3D409.1%7C428.1%7C357.5; ctm={'pgv':7574547137797923|'vst':2385523260024033|'vstr':4532683176955253|'intr':1566054655450|'v':1|'lvst':53}; i10c.uservisit=17; i10c.bdddb=c2-e665bBx4YeKyjjpB3dTkLGukvWtNTo20TLztSTnuSVwL1xuo56foG0jkv6tMv5vYDDvpPSGkO8sM36oGYzYkn8epvRzuxs55VDvPPRi1bSsMt3RFt48fL0r2qWyHwR5vUsqpUozFJXsHzarByeTkLAOehGaHvtx12Gqp4MiplheEjtoGyzZIMvjPqWyjARsfPIvkVzlnPdzHycoGyOgfL0eqTUtMVo20pbqpUMjNQdnMYxtGKprWtbepvRzuto2aPIvE6HdpOStut7SBy4yukG6GqWyHwRx0UIqp7QdpOSswt2tWIITkLvkNqWyKqtfyPIvkU1dpOq8hGxtGt56fL0hkv9wHvtx55DvPPRi8A5TyZxtGt56iG0JkvWOWqt2vVqqpUMlONatQt2TF19bfLaittZtMVs61VDvPTVmnJXSL29sByeXpL6epVV3P0o2aTN1wPRIoUWvH1doGyzYKG0jHBRyMquazZDvPPRiGDPiDt2tBzcWfLaepvgh2Vbx0UDwNSMiPJXsl8xtGt56iG0JkvWNjqt2vVqqpUMiqJYRHyeoGyzdL", 'referer':'https://www.digikey.com/products/en/audio-products/accessories/159', 'sec-fetch-mode':'navigate', 'sec-fetch-site':'same-origin', 'upgrade-insecure-requests':'1', 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36' } # 写入数据库操作 def write_database_operation(tdsList,itemAll): # for i in tdsList: # print(i) print(len(tdsList)) # print(itemAll) #mysql存储过程中用变量做表名 print("成功链接数据库") try: sql = """ INSERT IGNORE INTO NewData (Kind,PaKind,infourl,imageUrl,imagesPath,diginum,ManufacturePartNumber,Manufacturer,Description,QuantityAvailable,price,minmumQuantity,Series,ParaData) VALUES('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')"""\ .format(tdsList[0],tdsList[1],tdsList[2],tdsList[3],tdsList[4],tdsList[5],tdsList[6],tdsList[7],tdsList[8],tdsList[9],tdsList[10],tdsList[11],tdsList[12],itemAll) print(sql) cursor.execute(sql) # 执行命令 db.commit() # 提交事务 except: pass # 生成随机字符串为图片命名 # 直接放弃了 使用id作为图片命名 # def create_random(): # # 生成随机图片名 # import random, string # s = string.ascii_letters + string.digits # print(s) # # n = [''.join(random.choices(s, k=12)) for I in range(60000)] #存储随机生成字符串的列表 # # print(n) # return n # 废弃 # 下载图片和返回给数据库一个图片的路径 # 传入大类目的名字 因为导出使用的是大类目 后续使用大类目作为文件夹名 # 图片的名字使用十二位随机字符串 # 访问速度 所以使用的新函数 # def get_images_No(folderName,imageUrl): # # 随机生成字符串 # n =create_random() # # 保存文件夹的名字即我们的大目录名 # daleimu =folderName # # 是否有图片 如果没有图片使用统一的默认无图片路径 如果有的话在爬取和存储 # if imageUrl == 'photo not available': # filename = 'pna_en.jpg' # else: # # respone = requests.get('https://'+imageUrl,headers=headers) # dirname = daleimu # if os.path.exists(daleimu) == False: # os.makedirs(daleimu) #需要在爬取一个新的类目时即创建文件夹 # filename = dirname +'/'+ n[0]+'.JPG' # print(filename) # # fp = open(filename,'wb') # # fp.write(respone.content) # # fp.close() # return filename navigationBars = [] # 原属性栏列表 navigationBarsReally = [0,0,0,0,0] # 处理后属性栏列表 因为我们需要为前面的三个选项赋值 所以必须先定义 # 将页面的导航栏删除掉不需要的 以及将其处理成我们存入数据库的格式 def deal_navigation(navigationBars): # 处理导航栏 item = '' navigationBars.pop(0) navigationBars.pop(0) navigationBars[6] = 'price' # print(navigationBars) # 未导航栏中没有但是数据库中有的栏目添加进列表前面 navigationBarsReally[0] = 'Kine' # 大类目 navigationBarsReally[1] = 'MinKine' # 小类目 navigationBarsReally[2] = 'infoUrl' # 详情页链接 navigationBarsReally[4] = 'imagesPath' #图片路径 navigationBarsReally[3] = 'imageUrl' for i in range(1,len(navigationBars)): # image 的 td不能删除 所以在这里直接掠过1 if i < 9: # 将不变的导航栏写入新的列表 navigationBarsReally.append(navigationBars[i]) else: # print(navigationBars[i]) stritem = navigationBars[i] item += "<|>" + navigationBars[i] # print(navigationBarsReally) # print(item) # 将处理好的导航栏属性 返回 return navigationBarsReally,item # 请求html文件 def get_html(url): content = requests.get(url, headers=headers) soup = BeautifulSoup(content.text, 'lxml') return soup # 提取HTML的具体信息 处理后存入列表供写入数据库的函数调用 def get_html_sensor(url): print("正在爬取链接是:"+url) soup=get_html(url) # 匹配类目 hFirst = soup.find('h1').get_text() # print(hFirst) # print(hFirst.split(">")) hFirstList = hFirst.split(" > ") hFirstList.pop(0) # 分隔符提取出来选取后面两个属性 # print(hFirstList) # 匹配导航栏 tblheads = soup.find('thead') # print(tblheads) tr = tblheads.find('tr') ths = tr.find_all('th') for th in ths: # print(th.string) navigationBar =th.get_text().strip().strip() navigationBars.append(navigationBar) # print(navigationBar) navigationBarsReally,item=deal_navigation(navigationBars) # 提取具体的信息 : 将数据处理成我们所需要的格式 tbody = soup.find('tbody',id='lnkPart') itemscopes = tbody.find_all('tr')#获取每个商品的具体tr for itemscope in itemscopes: tds = itemscope.find_all('td')#获取每个属性的td # 删除掉导航栏的无用信息 tds.pop(0) tds.pop(0) # 为了下一个商品的信息可以正常存入列表和字符串中,我们需要在下一次迭代前进行一个更新 tdsList = [0,0] # 存储具体信息的列表 方便写入数据库 因为我们直接赋值两个元素 所以需要先自定义 类似navigationBarsReally 的定义 tdsItems = '' # 存储具体的信息的字符串 方便写入数据库 imagePath = '' # 将前面提取的类目存入tdsList tdsList[0] = hFirstList[0] # 大类目 tdsList[1] = hFirstList[1] # 小类目 # 详情页 图片路径 id Manufacturer Part Number Manufacturer Description Quantity Available price Minimum Quantity Series for i in range(len(tds)): # 获取图片的下载链接、商品的具体链接、以及添加商品的保存路径 # print(tdsList) if i == 0: try: # 提取图片的链接和具体商品的详情页存入到列表中 infoUrl = 'https://www.digikey.com' + tds[i].find('a').get('href') tdsList.append(infoUrl) # print('InfoUrl 是否被抓取到 '+ infoUrl) # 添加try 中 如果没有访问到 imgUrl 之后跳转到 except imgUrl = tds[i].find('img', 'pszoomer').get('zoomimg').replace('//', '') tdsList.append(imgUrl) # print("当前是否访问到imgUrl") # print(imgUrl) # print("访问到了!") # 下载图片 并将图片的下载路径传入 # imagePath = 'Testpath' # imagePath = get_images(tdsList[0],imgUrl) # # print(imagePath) # tdsList.append(imagePath) except: imgUrl= 'photo not available' tdsList.append(imgUrl) # imagePath = 'Testpath' # tdsList.append(imagePath) elif i == 5 : # print(i) # print(tds[i]) # 使用 replace去除掉html中的空格和回车 try: quantityAvailable = tds[i].find('span','desktop').get_text().replace('\n', ' ').replace('\r', ' ').strip() # print(quantityAvailable) tdsList.append(quantityAvailable) except: quantityAvailable='' tdsList.append(quantityAvailable) elif i == 7 : try: quantityAvailable = tds[i].find('span', 'desktop').get_text().replace('\n', ' ').replace('\t', ' ').replace('\r', ' ').strip() # print(quantityAvailable) tdsList.append(quantityAvailable) except: quantityAvailable = '' tdsList.append(quantityAvailable) elif 0 < i < 9 and i != 5 : tdsContent = tds[i].get_text().strip().strip().replace('Available:','').replace('Minimum:','') # print(tdsContent) tdsList.append(tdsContent) else: tdsContent = tds[i].get_text().strip().strip().replace('Available:', '').replace('Minimum:', '') # print(tdsContent) tdsItems += '<|>' + tdsContent # 图片的文件名为 ID + 'jpg' # 如果没有图片的话就直接保存 Test.jpg 文件 if imgUrl == 'photo not available': imagePath = 'Test.jpg' print(imagePath) tdsList.insert(4, imagePath) else: imagePath =tdsList[0]+'/'+tdsList[1]+'/'+tdsList[4] +'.JPG' print(imagePath) tdsList.insert(4,imagePath) print(navigationBarsReally) print(tdsList) # 存入到数据库的各个属性值 # print(item) # print(tdsItems) itemAll = item +' '+tdsItems # print(itemAll) # 存入到数据库的ParaData 即其他参数 # print('*****************************') # 调用写入数据库的函数将值传入数据库 # 必须在数据更新前进行写入操作 write_database_operation(tdsList,itemAll) sql = "update urlsOther set isOrNoGot = 1 where url = '%s'" % (url) # 如果已经被爬取过则修改 isOrNoGot 为 1 try: cursor.execute(sql) # 执行命令 db.commit() # 提交事务 except: db.rollback() # 回滚 # 测试是否已经更改 # sql = "select * from urlsOther where url = '%s'" % (url) # try: # cursor.execute(sql) # # print(cursor(0)) 不能使用这样子的方法来读取cursor() # for i in cursor: # print("更新后的数据:") # print(i) # except: # pass # 测试是否可以更新链接是否被爬取标记 def test_sql(): # 当前页的爬取完成之后我需要进入表中更新当前链接的获取属性 url = 'https://www.digikey.com/products/en/crystals-oscillators-resonators/crystals/171/page/101?FV=ffe001dd&quantity=0&ColumnSort=0&page=101&pageSize=500' sql= "update urlsFirst set isOrNoGot = 1 where url = '%s'" %(url) # 如果已经被爬取过则修改 isOrNoGot 为 1 try: cursor.execute(sql) # 执行命令 db.commit() # 提交事务 except: db.rollback() # 回滚 # 测试是否已经更改 sql = "select * from urlsFirst where url = '%s'" %(url) try: cursor.execute(sql) # print(cursor(0)) 不能使用这样子的方法来读取cursor() for i in cursor: print("更新后的数据:") print(i) except: pass # 获取链接函数 # 获取所有需要爬取的具体页面的链接存入txt文件 # 分为三个文件存储 即客户说的分三个数据库存储文件 def get_all_urls(url): # start = time.process_time() # 开始时间 htmlContent = get_html(url) ulsList = htmlContent.find_all('a', class_='catfilterlink') print(ulsList) # 最后一个是原链接 去掉 ulsList.pop(-1) urlsList = [] # 存储访问页面的url for url in ulsList: urlProducts = 'https://www.digikey.com' + url.get('href') # 构建url使得每次访问的页面都有500个商品 print(urlProducts) urlsList.append(urlProducts) # with open('urls.txt', 'a', encoding='utf-8') as f: # f.write(urlProducts + '\n') # f.close() # 遍历首页获取到的所有链接进行插入 for i in range(0, len(urlsList)): # 多表查询 """ # 添加一个判断链接是否已经存在的值 如果已经存在则不不需要进行爬取的下一步 testUrl=urlsList[i]+ '?FV=ffe001dd&quantity=0&ColumnSort=0&page=1&pageSize=500' # testUrl= 'https://www.digikey.com/products/en/audio-products/accessories/159/page/1?FV=ffe001dd&quantity=0&ColumnSort=0&page=1&pageSize=500' print(testUrl) # sql = "select count(*) from urlsFirst where url='%s'" % (testUrl) # 从一个表中查询 sql="select * from urlsFirst where url='%s' union select * from urlsSecond where url='%s' union select * from urlsOther where url='%s'" %(testUrl,testUrl,testUrl) cursor.execute(sql) print(cursor.fetchone()) # print(list(cursor.fetchone()[0])) # 输出查询到的数字的类型 if(cursor.fetchone() == None): print("数据库中已经存在"+urlsList[i]) continue # 使用continue 跳出当前if循环 使用break 可以跳出当前 for 循环 else: """ htmlDetailsPages = get_html( urlsList[i] + '?FV=ffe001dd&quantity=0&ColumnSort=0&page=1&pageSize=500') # 添加后缀来计算500一页的商品有多少页 # 访问页面 查看页数后是否还有数字 span = htmlDetailsPages.find('span', class_='current-page') pages = span.get_text().replace('Page 1/', '').replace(',', '') # 总共有多少页 print(pages) print("原网页" + urlsList[i]) if 'crystals-oscillators-resonators' in urlsList[i] or 'integrated-circuits-ics' in urlsList[i] or 'development-boards-kits-programmers' in urlsList[i] or 'switches' in urlsList[i]: for j in range(1, int(pages) + 1): # 拼凑可以直接访问下一页的链接 newUrl = urlsList[i] + '/page/' + str(j) + '?FV=ffe001dd&quantity=0&ColumnSort=0&page=' + str( j) + '&pageSize=500' urlsList.append(newUrl) print(newUrl) # 第一种保存链接方法 存入本地 txt # with open('urlFirst.txt', 'a', encoding='utf-8') as f: # f.write(newUrl + '\n') # f.close() # 第二种保存链接方法 存入数据库 try: sql = """ INSERT IGNORE INTO urlsFirst (url) VALUES('{}')""" \ .format(newUrl) # print(sql) print("正在插入链接到urlsFirst表中:" + newUrl) cursor.execute(sql) except: pass elif 'connectors-interconnects' in urlsList[i] or 'capacitors' in urlsList[i] or 'resistors' in urlsList[i]: for j in range(1, int(pages) + 1): # 拼凑可以直接访问下一页的链接 newUrl = urlsList[i] + '/page/' + str(j) + '?FV=ffe001dd&quantity=0&ColumnSort=0&page=' + str( j) + '&pageSize=500' urlsList.append(newUrl) print(newUrl) # 第一种保存链接方法 存入本地 txt # with open('urlSecond.txt', 'a', encoding='utf-8') as f: # f.write(newUrl + '\n') # f.close() # 第二种保存链接方法 存入数据库 try: sql = """ INSERT IGNORE INTO urlsSecond (url) VALUES('{}')""" \ .format(newUrl) # print(sql) print("正在插入链接到urlsSecond表中:" + newUrl) cursor.execute(sql) except: pass else: for j in range(1, int(pages) + 1): # 拼凑可以直接访问下一页的链接 newUrl = urlsList[i] + '/page/' + str(j) + '?FV=ffe001dd&quantity=0&ColumnSort=0&page=' + str( j) + '&pageSize=500' urlsList.append(newUrl) print(newUrl) # 第一种保存链接方法 存入本地 txt # with open('urlOthers.txt', 'a', encoding='utf-8') as f: # f.write(newUrl + '\n') # f.close() # 第二种保存链接方法 存入数据库 try: sql = """ INSERT IGNORE INTO urlsOther (url) VALUES('{}')""" \ .format(newUrl) # print(sql) print("正在插入链接到urlsOther表中:"+newUrl) cursor.execute(sql) except: pass print(len(urlsList)) # end = time.process_time() # 结束时间 # SpendTime = str(end - start) # 测试整个程序运行的总时长 # with open('urls.txt', 'a', encoding='utf-8') as f: # f.write(SpendTime) # f.close() allUrls = [] # 读取链接函数存入列表方便多线程调用 def read_all_urls(): # 废弃方法 不适合监测爬虫进度 # 读取urls.txt 文件中的所有链接 封装成函数方便使用多线程 # filenames = 'urlsTest.txt' # with open(filenames) as file_object: # for content in file_object: # # print(content.rsplit()) # allUrls.append(content.replace('\n','')) # return allUrls # 读取数据库的表文件 然后判断是否被爬取过 如果爬取过不写入列表 没有则写入 sql = ('select * from urlsOther where isOrNoGot != 1')#表中所有信息读取所有信息 如果 isOrNoGot 不为 1 则获取 # sql = ('select url from urlsFirst where isOrNoGot != 1') # 读取url 的信息 cursor.execute(sql) urls = [] for i in cursor: urls.append(i[0]) # 因为我只要第一个键的属性 即 url 所以直接存入列表元素的 0 即可 print(urls) return urls # 开始函数 # 使用多线程来测试反爬对于爬取速度的限制为多少 from multiprocessing import Pool def get_url_content_test(): # # 添加一个检测当前CPU核数的代码 将该数字填入到下面的代码中去实现自动的多线程爬取 # 启动多线程爬取程序 urls = read_all_urls() start_2 = time.time() pool = Pool(processes=1) pool.map(get_html_sensor, urls) end_2 = time.time() print('2进程爬虫耗时:', end_2 - start_2) # start_3 = time.time() # pool = Pool(processes=10) # pool.map(get_html_sensor, urls) # end_3 = time.time() # print('2进程爬虫耗时:', end_3 - start_3) if __name__=='__main__': url= 'https://www.digikey.com/products/en' # 第一步 将所有得链接分类存储和保存在数据库中 # get_all_urls(url) # 测试读取数据库爬取链接代码 # read_all_urls() # 测试更新数据库数据代码 # test_sql() # 第二步 访问分类存储的链接 爬取之后标注为 1 默认值为0 # get_url_content_test() # 第三步 多线程爬取图片存储在分类的文件夹下 # get_images(folderName,imageUrl) url = 'https://www.digikey.com/products/en/computer-equipment/kvm-switches-keyboard-video-mouse-cables/896/page/1?FV=ffe001dd&quantity=0&ColumnSort=0&page=1&pageSize=500' get_html_sensor(url)
[ "afrunk7@gmail.com" ]
afrunk7@gmail.com
251660ad7fcbe5e0382b980fc9d1abb986e30e43
33de81c7970f12926cbc4dc3cce3655898afcd55
/collatz.py
4a6b9437737fe827db6f9916e369ea19762cf5cb
[]
no_license
HazelIP/pands-problem-sheet
b62fcad87315ad5991b8dd1ffe7f9a55f1a6c92e
c13a58fbdb65e4f1e7a2efeef43ff4ba92688f00
refs/heads/main
2023-04-13T15:59:26.968861
2021-04-07T05:20:28
2021-04-07T05:20:28
334,283,014
0
0
null
null
null
null
UTF-8
Python
false
false
1,506
py
# This program ask user to input any positive integer and outputs successive values of the below calculation # At each step calculate the next value by taking the current value # if the input is even divided by 2, if odd x3 +1 # end program while output = 1 # Author: Ka Ling Ip def main(): #create a function called main (ref[4.1]) numIn = int(input("Please enter a positive integer:")) #ask for a positive integer allNum = [] #create a list to store the calculations allNum.append(numIn)# store user input in the list (ref[4.2]) if numIn > 1: #proceed to calculation only when input is a positive integer (ref[4.3]) while numIn != 1: # program ends while output =1 (ref[4.4]) if numIn % 2 == 0: #do the following if user input/successive value is an even number numIn = int(numIn/2) allNum.append(numIn) # put the calculation in the list else: numIn = int(numIn*3+1) #do this if user input is an odd number allNum.append(numIn) # put the calculation in the list print (allNum) #print the list of output elif numIn <= 0: #if user input a negative number prompt again main() main() #call the function #ref[4.1]: https://www.w3schools.com/python/python_functions.asp #ref[4.2]: https://www.w3schools.com/python/python_lists_add.asp #ref[4.3]: https://www.w3schools.com/python/python_conditions.asp #ref[4.4]: https://www.w3schools.com/python/python_while_loops.asp
[ "g00398581@gmit.ie" ]
g00398581@gmit.ie
d920e145be21cf8acf22577c83cf9016cdd89833
e95c988f578064830c34708d11b16b4f0e72d468
/src/calculator.py
00ca1ab20a12d76f12c26c294ae027ff7dfcfd7b
[]
no_license
shishirajm/date_calculator
780b002f0183b7fac3719a5f9c4e669909f347e6
5e0297a02da400ed20701fc26dea658ee79118a4
refs/heads/main
2023-03-11T12:34:15.882215
2021-02-27T18:09:09
2021-02-27T18:09:09
342,792,353
0
0
null
null
null
null
UTF-8
Python
false
false
280
py
from src.date import Date def get_dates_diff(date1, date2): try: log(date1) log(date2) d1 = Date(date1) d2 = Date(date2) return d1 - d2, '' except Exception as e: log(e) return -1, e def log(msg): print(msg)
[ "shishirajm@gmail.com" ]
shishirajm@gmail.com
0a454a6e9c041f2defd78711ff6565bca268f9b5
f9c16bd4969d10f5f860205dcab5035f075209b8
/pigat/spiders/pigat_cms.py
69f32d5b523516e7007243342e349e05a4cd4963
[]
no_license
cracer/pigat
3ec3c64560846032e0f8d1645e21a9c1e99dc33d
9a9082db8bd7f79d8960bc1e19b312093c32855b
refs/heads/master
2022-12-29T13:20:34.469470
2020-10-14T05:23:46
2020-10-14T05:23:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,594
py
import time import json import zlib import requests import scrapy import pymongo from bs4 import BeautifulSoup from pigat.items import PigatItem_cms class pigat_ip(scrapy.Spider): name = 'pigat_cms' def start_requests(self): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'} ip_headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' } url = self.url # 待爬取 URL client = pymongo.MongoClient('localhost', 27017) # 连接数据库 collection = client['pigat']['pigat_subdomain'] # 读取数据 if list(collection.find({'url': url})) == []: # 判断数据是否为空 print( '\n\033[1;31m[{}] 数据库中未查询到 {} 的子域信息,无法进行 {} 的 CMS 信息,请先获取 {} 的子域信息\n\033[0m'.format(time.strftime('%Y-%m-%d %H:%M:%S'), url, url, url)) else: print('\n\033[1;33m[{}] 正在被动收集 {} 的子域 CMS 信息……\033[0m'.format(time.strftime('%Y-%m-%d %H:%M:%S'), url)) for i in collection.find({'url': url}): subdomain_url = i['subdomain_url'] # 子域cms查询 if 'http' not in subdomain_url: sub_url1 = 'http://' + subdomain_url yield scrapy.Request(sub_url1, headers=headers, meta={'url': url, 'sub_url': sub_url1}, callback=self.sub_cms) sub_url2 = 'https://' + subdomain_url yield scrapy.Request(sub_url2, headers=headers, meta={'url': url, 'sub_url': sub_url2}, callback=self.sub_cms) else: yield scrapy.Request(subdomain_url, headers=headers, meta={'url': url, 'sub_url': subdomain_url}, callback=self.sub_cms) def sub_cms(self, response): url = response.meta['url'] subdomain_url = response.meta['sub_url'] whatweb_dict = {"url": response.url, "text": response.text, "headers": dict(self.convert(response.headers))} whatweb_dict = json.dumps(whatweb_dict) whatweb_dict = whatweb_dict.encode() whatweb_dict = zlib.compress(whatweb_dict) data = {"info": whatweb_dict} cms_response2 = requests.post(url="http://whatweb.bugscaner.com/api.go", files=data) cms_json = json.loads(cms_response2.text) cms_soup = BeautifulSoup(response.text, 'html.parser') try: cms_title = cms_soup.title.text except: cms_title = '' pass try: cms_CMS = cms_json['CMS'][0] except: cms_CMS = '' pass try: cms_Font_Scripts = cms_json['Font Scripts'][0] except: cms_Font_Scripts = '' pass try: cms_JavaScript_Frameworks = cms_json['JavaScript Frameworks'][0] except: cms_JavaScript_Frameworks = '' pass try: cms_JavaScript_Libraries = cms_json['JavaScript Libraries'][0] except: cms_JavaScript_Libraries = '' pass try: cms_Miscellaneous = cms_json['Miscellaneous'][0] except: cms_Miscellaneous = '' pass try: cms_Operating_Systems = cms_json['Operating Systems'][0] except: cms_Operating_Systems = '' pass try: cms_Photo_Galleries = cms_json['Photo Galleries'][0] except: cms_Photo_Galleries = '' pass try: cms_Programming_Languages = cms_json['Programming Languages'][0] except: cms_Programming_Languages = '' pass try: cms_Web_Frameworks = cms_json['Web_Frameworks'][0] except: cms_Web_Frameworks = '' pass try: cms_Web_Servers = cms_json['Web Servers'][0] except: cms_Web_Servers = '' pass try: cms_Widgets = cms_json['Widgets'][0] except: cms_Widgets = '' pass try: cms_error = cms_json['error'][0] except: cms_error = '' pass try: cms_Waf = cms_json['Waf'][0] except: cms_Waf = '' pass try: cms_CDN = cms_json['CDN'][0] except: cms_CDN = '' pass try: cms_Marketing_Automation = cms_json['Marketing Automation'][0] except: cms_Marketing_Automation = '' pass item = PigatItem_cms( url=url, subdomain_url=subdomain_url, cms_title=cms_title, cms_CMS=cms_CMS, cms_Font_Scripts=cms_Font_Scripts, cms_JavaScript_Frameworks=cms_JavaScript_Frameworks, cms_JavaScript_Libraries=cms_JavaScript_Libraries, cms_Miscellaneous=cms_Miscellaneous, cms_Operating_Systems=cms_Operating_Systems, cms_Photo_Galleries=cms_Photo_Galleries, cms_Programming_Languages=cms_Programming_Languages, cms_Web_Frameworks=cms_Web_Frameworks, cms_Web_Servers=cms_Web_Servers, cms_Widgets=cms_Widgets, cms_error=cms_error, cms_Waf=cms_Waf, cms_CDN=cms_CDN, cms_Marketing_Automation=cms_Marketing_Automation ) yield item cms_info = '' for i in cms_json: cms_info = cms_info + '\t' + cms_json[i][0] print('\033[1;32m[{}] {}\t{}\t{}\t{}\033[0m'.format(time.strftime('%Y-%m-%d %H:%M:%S'), url, subdomain_url, cms_title, cms_info)) if cms_response2.headers['X-RateLimit-Remaining'] == '今日识别 cms 剩余次数:0': print('\033[1;33m[{}] 每天有 1500 次免费识别次数,今日剩余次数已为 0,挂代理可继续使用\n\033[0m'.format( time.strftime('%Y-%m-%d %H:%M:%S'))) def convert(self, data): if isinstance(data, bytes): return data.decode('ascii') if isinstance(data, list): return data.pop().decode('ascii') if isinstance(data, dict): return dict(map(self.convert, data.items())) if isinstance(data, tuple): return map(self.convert, data) return data
[ "1205613989@qq.com" ]
1205613989@qq.com
b79f48e5eac894729b2be9ba8b6dd039868089e1
97c3ee99b694e2bae1d84d13d2311bc4430f362d
/firmware/rm5.py
90f51a071b63a1e0dfb3ee22f8b749c41a674c50
[]
no_license
Technonautes/technotireuse
2d9df1ae4aa0b42143ea4c541e6792e4ee436d38
3ac7edf5837842347d26b7f72a47ce5f128ef962
refs/heads/master
2020-04-20T17:11:17.117830
2013-02-24T00:04:13
2013-02-24T00:04:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
123
py
from serial import Serial s = Serial('/dev/ttyUSB0') while True: s.write(chr(0) + chr(29) + "\n") print s.read(4)
[ "gabe@squirrelsoup.net" ]
gabe@squirrelsoup.net
6a3525a5768ae1cca9acf1b58bac76069208d3a9
1de26609eb6a84da6e431425ea76241dc54a270b
/旧的版本/0.76/o2oPrinter.py
46de58383f53d7025a103c411706110d78c215b4
[]
no_license
ZUXING/o2oPrinter
de3e56a2a6cdd7632f113f42a5aff6b7f7606f2c
269a46e6188037c067529129ba5f2b1ca64d7d98
refs/heads/master
2022-12-23T06:41:17.348453
2020-09-19T15:08:09
2020-09-19T15:08:09
279,644,467
0
0
null
null
null
null
UTF-8
Python
false
false
808
py
import os import win32api import win32event import win32process import win32con import win32print #Version 0.76 #ChenGuanglin Software Studio & ZUXING print("Codename:o2oPrinter") print("在线打印机开始工作") from win32com.shell.shell import ShellExecuteEx from win32com.shell import shellcon recvFilePath = 'o2oPrint\\' while 1: dirs = os.listdir(recvFilePath) for file in dirs: if os.path.splitext(file)[1] == ".docx": print(file) process_info = ShellExecuteEx(nShow=win32con.SW_SHOW, fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, lpVerb='print', lpFile=recvFilePath + file, lpParameters='/d:"%s"' % win32print.GetDefaultPrinter ()) win32event.WaitForSingleObject(process_info['hProcess'], -1) print("打印完成") os.remove(recvFilePath + file) os.system("pause")
[ "zuxingv@foxmail.com" ]
zuxingv@foxmail.com
b2bfe60fd2757ea1c81ebdbf993288df5d87aa51
b98e5b36afde16824ff7f24394a13db33498ab13
/Ensaio_RAW.py
19770e03d752ae6a95b7447be944fad7cdd0b333
[]
no_license
GuilhermeMPPython/Calculab.py
2ed15c675c72c71fe734a83e8cac0c3fe1649ad2
f76fff1f205177f81baa054d3b03c443b870dfdf
refs/heads/master
2020-07-01T07:22:04.177569
2019-08-09T14:53:59
2019-08-09T14:53:59
201,088,157
0
0
null
null
null
null
UTF-8
Python
false
false
931
py
# -*- coding: utf-8 -*- """ Created on Wed Aug 7 13:55:44 2019 @author: Guilherme MP """ import celula_p import leishmania_p import virgula def ensaio_infeccao_RAW(): print (' ','='*38) print('\t CÁLCULO LEISHMANIA') print(' ','='*38) leish_poco = round(leishmania_p.leishmania(),2) print (' ','='*38) print('\tCÁLCULO CÉLULAS RAW') print(' ','='*38) cel_poco = round (celula_p.celula(),2) quant_meio = 180 - (leish_poco + cel_poco) print() print (' ','='*38) print('\tENSAIO DE INFECÇÃO RAW') print(' ','='*38) p = int(input(f''' N° de poços: ''')) print(f' -> Quant de leish + cel/poço: {virgula.virgula(round((leish_poco + cel_poco),2))} uL') print (f' -> Total de leishmania: {virgula.virgula(leish_poco*4)} uL') print (f' -> Total de celula: {virgula.virgula(cel_poco*4)} uL') print (f' -> Total de meio: {virgula.virgula(quant_meio*4)} uL')
[ "53872837+GuilhermeMPPython@users.noreply.github.com" ]
53872837+GuilhermeMPPython@users.noreply.github.com
195624fb20c54ced15a65be4c1af7cb329cc3b1c
31f9333012fd7dad7b8b12c1568f59f33420b0a5
/Alessandria/env/lib/python3.8/site-packages/django/contrib/staticfiles/testing.py
754bd296574e9e20066c857e41043e1bb11bfcc3
[]
no_license
jcmloiacono/Django
0c69131fae569ef8cb72b135ab81c8e957d2a640
20b9a4a1b655ae4b8ff2a66d50314ed9732b5110
refs/heads/master
2022-11-15T22:18:57.610642
2020-07-14T14:43:16
2020-07-14T14:43:16
255,125,001
0
0
null
null
null
null
UTF-8
Python
false
false
464
py
from django.contrib.staticfiles.handlers import StaticFilesHandler from django.test import LiveServerTestCase class StaticLiveServerTestCase(LiveServerTestCase): """ Extend django.test.LiveServerTestCase to transparently overlay at test execution-time the assets provided by the staticfiles app2 finders. This means you don't need to run collectstatic before or as a part of your tests setup. """ static_handler = StaticFilesHandler
[ "jcmloiacono@gmail.com" ]
jcmloiacono@gmail.com
c4e5fb6ae2eebc51f64b8a0e86de608c5e28fba1
c3cfcf8cd82626cb66019499471c7532b8559026
/KNN/KNN.py
4e13b46a68ac605c00ab5e9d2ec8105bfeeb9e2d
[]
no_license
shyheng/Machin_Learning
de7ee6d0e8f50789a7f065a2f0eb0fd7e4130a80
03d92c9935f2366bf571068cd133fe3734f5c183
refs/heads/main
2023-01-24T12:23:59.224756
2020-11-21T16:30:52
2020-11-21T16:30:52
314,854,402
0
0
null
null
null
null
UTF-8
Python
false
false
1,405
py
import csv import random #读取 with open('Prostate_Cancer.csv','r') as file: reader = csv.DictReader(file) datas=[row for row in reader] # 分组 random.shuffle(datas) n=len(datas)//3 text_set=datas[0:n] train_set=datas[n:] # print(text_set) # KNN # 距离 def distance(d1,d2): res=0 for key in ("radius","texture","perimeter","area","smoothness","compactness","symmetry","fractal_dimension"): res+=(float(d1[key])-float(d2[key]))**2 return res**0.5 K = 10 # 四种方法固定不变 def knn(data): # 1距离 res = [ {"结果":train["diagnosis_result"],"距离":distance(data,train)} for train in train_set ] # print(res) # 2排序-升序 res = sorted(res,key=lambda item:item['距离']) # 3取前K个 res2 = res[0:K] # 4加权平均 result={'B':0,'M':0} # 总距离 sum=0 for r in res2: sum+=r['距离'] for r in res2: result[r['结果']]+=1-r['距离']/sum print(result) print(data['diagnosis_result']) if result['B']>result['M']: return 'B' else: return 'M' knn(text_set[0]) # 测试阶段 correct = 0 for text in text_set: result = text['diagnosis_result'] result2 = knn(text) if result == result2: correct+=1 print(correct) print(len(text_set)) print("准确率:{:.2f}%".format(100*correct/len(text_set)))
[ "shy2210278285@aliyun.com" ]
shy2210278285@aliyun.com
e02f4a0c5b78cca43171902e5b8212d0c9bf443a
2fe18f4babd857381c2251f1c2437ccdae234dd8
/bookmarks/bookmarks/settings.py
273af7e947825b97a57cf7f7558397f12874a3f2
[]
no_license
Akhtyrtsev/bookmarks
62f23d87c9442aaa2f56c73dd52ddbf8e456f7e1
c8c52f1a9d4674a7187ad2408af7c090424a9738
refs/heads/master
2020-07-03T23:17:44.547699
2019-08-15T12:27:04
2019-08-15T12:27:04
202,083,635
0
0
null
null
null
null
UTF-8
Python
false
false
3,627
py
""" Django settings for bookmarks project. Generated by 'django-admin startproject' using Django 2.2.4. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '71wl&ele@0v_^508xm(cy)z!%6is^_sb1k_k4b$2=1gzupra-r' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'account.apps.AccountConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'bookmarks.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'bookmarks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] LOGIN_REDIRECT_URL = 'dashboard' LOGIN_URL = 'login' LOGOUT_URL = 'logout' # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.ModelBackend', 'account.authentication.EmailAuthBackend', ] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media/') EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'mytestmail842@gmail.com' EMAIL_HOST_PASSWORD = 'mytestmail842mytestmail842' EMAIL_PORT = 587 EMAIL_USE_TLS = True
[ "akhtyrtsev@gmail.com" ]
akhtyrtsev@gmail.com
3038e558ef979265c039fb7fa0e2bd16093e2d0d
2bda7591991e5be48f36fb22d562cc843d03ba73
/src/main.py
b63f314209ff76f19897c5e34ac3e29a47039d39
[]
no_license
julflore000/Thermo-Final-Project
b95857a1d35480f934fb5747d7fa8fe4f57eb852
8930a4126e6cc7e9348a55af8bba3904116eae23
refs/heads/main
2023-02-02T19:05:58.586166
2020-12-12T00:47:25
2020-12-12T00:47:25
316,055,706
0
0
null
null
null
null
UTF-8
Python
false
false
6,467
py
import pandas as pd import numpy as np import csv import matplotlib.pyplot as plt import math import datetime import seaborn as sns def calculateEnergyGenerated(startDate,endData,keepRes = False): """ generates the ROR electricity plant for a start date to an end date start and end dates: must be in datetime formats keepRes: if false, returns a single value for how much energy was generated (joules) for time span if true, returns the energy generated at each 15 minute resolution (Watts) for time span """ dischargeData = df[df.Date.between(startDate, endData)]['Mass Flow (ft^3/s)'] #reading in data dischargeData = np.array(dischargeData) * footCubeToMeter#in m^3/s inputVelocityData = dischargeData / area #now in m/s inputVelocityData = np.where(inputVelocityData > 10, 10, inputVelocityData) #capping max velocity at 10 m/s massFlowData = dischargeData * density #now in kg/s workGenerated = efficiency * .5 * massFlowData * (inputVelocityData *inputVelocityData) #in joules / second or watts energyGenerated = workGenerated * timeGap #(converting to joules generated for each 15 minute period) if keepRes: return workGenerated #returning in watt format else: return (np.sum(energyGenerated)) #returning in watt-hours or in joules #setting constants footToMeter = 0.3048000 # m/ft footCubeToMeter = footToMeter ** 3 #m^3/ft^3 length = 150 * footToMeter #feet to meters depth = 18 * footToMeter #feet to meters area = math.pi *length * depth / 2 #in m^2 density = 997 # in kg/m^3 efficiency = .81 ########################################## #Data comes in from the'discharge_huron_river_data.csv' in 15 minute intervals from 2010-01-01 to 2019-12-31 ########################################## #reading in data df = pd.read_csv("Cleaned_Huron_River_Discharge_Data.csv",parse_dates = True) #converting to datetime fromat so we can index into a specific year and removing leap year day df["Date"] = pd.to_datetime(df["Date"], format='%Y-%m-%d %H:%M') df = df[~((df["Date"].dt.month == 2) & (df["Date"].dt.day == 29))] #defining constants and start and end years year = 2010 endYear = 2020 timeGap = 15 * 60 #15 minutes converted to seconds for each of the csv entries wattHourConversion = 60 * 60 #how many seconds are in 1 hour timesThru = 0 #defining total generation dataset, 5 rows for each year: format goes total generation then spring,summer,fall,winter totalGenerationDataset = np.zeros((5,endYear-year)) highResData = np.zeros((endYear-year,8760*4)) while year < 2020: #setting up dates for getting data from january 1st to december 31st start_date = pd.to_datetime('01/01/%s' % (year)) end_date = pd.to_datetime('12/31/%s 23:59' % (year)) #spring season date range springStart = pd.to_datetime('03/20/%s 00:00' % (year)) springEnd = pd.to_datetime('06/19/%s 23:59' % (year)) #summer season date range summerStart = pd.to_datetime('06/20/%s 00:00' % (year)) summerEnd = pd.to_datetime('09/21/%s 23:59' % (year)) #fall season date range fallStart = pd.to_datetime('09/22/%s 00:00' % (year)) fallEnd = pd.to_datetime('12/20/%s 23:59' % (year)) #winter season date range winterStart = pd.to_datetime('12/21/%s 00:00' % (year)) winterEnd = pd.to_datetime('03/19/%s 23:59' % (year)) #looking at seasons over the years totalGenerationDataset[0][timesThru] = calculateEnergyGenerated(start_date,end_date) #full year data totalGenerationDataset[1][timesThru] = calculateEnergyGenerated(springStart,springEnd) #spring data totalGenerationDataset[2][timesThru] = calculateEnergyGenerated(summerStart,summerEnd) #summer data totalGenerationDataset[3][timesThru] = calculateEnergyGenerated(fallStart,fallEnd) #fall data #winter data, what we did was add winter from end of year and winter from start of year totalGenerationDataset[4][timesThru] = calculateEnergyGenerated(winterStart,end_date) #winter to end of year totalGenerationDataset[4][timesThru] += calculateEnergyGenerated(start_date,winterEnd) #adding on from january 1st to start of spring highResData[timesThru] = calculateEnergyGenerated(start_date,end_date,keepRes = True) /1000 #full year data converted to Kilowatts from Watts plt.plot(highResData[timesThru],label=year,alpha=.25) #converting to kW year += 1 timesThru += 1 #average value of total year ROR generation print("Average generation over years: %s" % (np.average(totalGenerationDataset[0][:]))) #this graph shows what the expected watt hour generation would be for the run of river plant for each 15 minute discharge period plt.title("Energy generation at each 15 minute increment for Huron ROR") plt.ylabel("Kilowatts") plt.xlabel("Date") plt.plot(np.mean(highResData,axis=0),label="Average of Years") plt.legend() # Get locations and labels locs, labels = plt.xticks() #custom location for labels in line with automatically generated tick locations plt.xticks(locs[1:9], ["01/01","02/22","04/15","06/06","07/28","09/18","11/09","12/31"]) plt.show() #barchart graph years = ('2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019') y_pos = np.arange(len(years)) #seasons combined together with no differentiation between seasons ''' plt.bar(y_pos, totalGenerationDataset[0], align='center',label = "Total Generation") plt.xticks(y_pos, years) plt.ylabel('Total Energy Generated (Joules)') plt.title('Total Energy Generated for each Year')# of Run of River Power Plant at Huron River') plt.legend() totalGenerationDataset = totalGenerationDataset / 1000 #converting to kWh ''' totalGenerationDataset = totalGenerationDataset / 1000 #converting watt hours to kWh #stacking bar chart values plt.bar(y_pos, totalGenerationDataset[1], align='center',label = "Spring") plt.bar(y_pos, totalGenerationDataset[2], align='center',label = "Summer",bottom=totalGenerationDataset[1]) plt.bar(y_pos, totalGenerationDataset[3], align='center',label = "Fall",bottom=(totalGenerationDataset[1]+totalGenerationDataset[2])) plt.bar(y_pos, totalGenerationDataset[4], align='center',label="Winter",bottom=(totalGenerationDataset[1]+totalGenerationDataset[2]+totalGenerationDataset[3])) #plotting bar chart plt.xticks(y_pos, years) plt.ylabel('Energy Generated (kWh)') plt.title('Interannual Variation of Run-of-River Plant')# of Run of River Power Plant at Huron River') plt.legend() plt.show()
[ "52216279+julflore000@users.noreply.github.com" ]
52216279+julflore000@users.noreply.github.com
93997a040bba25b98298ab1d27859cb7599ddd00
911790a1b0b9cdc0e2348e9cd849a68e9be67cab
/video_frame.py
1a22f6848953516cfa2bb7d2aa04aff0a866fc9a
[ "Apache-2.0" ]
permissive
ANDROID564/pc_cyber_lab
9dbff546e049b2e6ed5feb5f2c7aea18d0e8b322
704aa94ec74f78d9221f4c0f96cfa62a9e10d3ef
refs/heads/master
2020-05-25T04:03:23.566675
2019-05-20T11:41:12
2019-05-20T11:41:12
187,619,159
0
0
null
null
null
null
UTF-8
Python
false
false
1,183
py
''' Using OpenCV takes a mp4 video and produces a number of images. Requirements ---- You require OpenCV 3.2 to be installed. Run ---- Open the main.py and edit the path to the video. Then run: $ python main.py Which will produce a folder called data with the images. There will be 2000+ images for example.mp4. ''' import cv2 import numpy as np import os # Playing video from file: cap = cv2.VideoCapture('2.avi') try: if not os.path.exists('data1'): os.makedirs('data1') except OSError: print ('Error: Creating directory of data') currentFrame = 0 while(True): # Capture frame-by-frame ret, frame = cap.read() if ret: # Saves image of the current frame in jpg file name = './data1/frame' + str(currentFrame) + '.jpg' print ('Creating...' + name) cv2.imwrite(name, frame) # To stop duplicate images currentFrame += 1 cv2.imshow('Video', frame) if cv2.waitKey(100) & 0xFF == ord('q'): break else: break # When everything done, release the capture cap.release() cv2.destroyAllWindows()
[ "noreply@github.com" ]
noreply@github.com
b7a39781da795ef517cb03964f9dde5fd7e01f70
376a1b1be24210e8860662f3d499124ab25b30a7
/src/Semestre_2019_2/2019_09_17_Underfitting_Overfitting.py
d51a59c9e3e524c6cafd95a9704fd9e2225451fe
[]
no_license
nocyamribeiro/EEL891
0aabcc456374c9b9005251d0342460c3eea1f5db
2e265d71a2fba661843662047a83837e4aecf10d
refs/heads/master
2021-01-06T05:35:57.075806
2019-11-19T03:42:16
2019-11-19T03:42:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,131
py
#============================================================================== # Underfitting e Overfitting #============================================================================== polynomial_degree = 3 # grau do polinomio usado no modelo number_of_samples = 20 # numero de amostras de dados disponiveis #------------------------------------------------------------------------------ # Definir a função real (sem ruido) de onde vieram as amostras # (nao utilizada pelo regressor, usada somente para visualizacao grafica) #------------------------------------------------------------------------------ import numpy as np X_grid = np.linspace(0, 1.00, 101).reshape(-1,1) y_grid = np.sin(2 * np.pi * X_grid) #------------------------------------------------------------------------------ # Gerar um conjunto de amostras com ruido gaussiano em torno da funcao #------------------------------------------------------------------------------ np.random.seed(seed=0) X_rand = np.random.rand(number_of_samples,1) y_rand = np.sin(2 * np.pi * X_rand) + 0.20 * np.random.randn(number_of_samples,1) #------------------------------------------------------------------------------ # Dividir o conjunto de dados em conjunto de treinamento e conjunto de teste #------------------------------------------------------------------------------ from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X_rand, y_rand, test_size = 0.5 #, random_state = 352019 ) #------------------------------------------------------------------------------ # Visualizar as amostras em um diagrama de dispersao #------------------------------------------------------------------------------ import matplotlib.pyplot as plt plt.figure(figsize=(8,8)) plt.title('Amostras Disponiveis') plt.scatter ( X_train , y_train , color = 'red' , marker = 'o' , s = 30 , alpha = 0.5 , label = 'Amostras para Treinamento' ) plt.scatter ( X_test , y_test , color = 'green' , marker = 'o' , s = 30 , alpha = 0.5 , label = 'Amostras para Teste' ) plt.plot ( X_grid , y_grid , color = 'grey' , linestyle='dotted' , label = 'Funcao alvo (desconhecida)' ) plt.legend() plt.xlabel('X') plt.ylabel('y') plt.ylim(-1.5,1.5) plt.show() #raise SystemExit() #------------------------------------------------------------------------------ # Treinar um regressor polinomial com o conjunto de treinamento #------------------------------------------------------------------------------ from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression pf = PolynomialFeatures(polynomial_degree) lr = LinearRegression() X_train_poly = pf.fit_transform(X_train) lr = lr.fit(X_train_poly, y_train) #------------------------------------------------------------------------------ # Obter a resposta do modelo para o proprio conjunto de treinamento #------------------------------------------------------------------------------ y_train_pred = lr.predict(X_train_poly) #------------------------------------------------------------------------------ # Obter a resposta do modelo para o conjunto de teste #------------------------------------------------------------------------------ X_test_poly = pf.transform(X_test) y_test_pred = lr.predict(X_test_poly) #------------------------------------------------------------------------------ # Calcular o desempenho do modelo dentro e fora da amostra #------------------------------------------------------------------------------ import math from sklearn.metrics import mean_squared_error, r2_score RMSE_in = math.sqrt ( mean_squared_error ( y_train , y_train_pred ) ) RMSE_out = math.sqrt ( mean_squared_error ( y_test , y_test_pred ) ) R2_in = r2_score ( y_train , y_train_pred ) R2_out = r2_score ( y_test , y_test_pred ) #------------------------------------------------------------------------------ # Obter a resposta do modelo para o grid (para posteriormente plotar a curva) #------------------------------------------------------------------------------ X_grid_poly = pf.transform(X_grid) y_grid_pred = lr.predict(X_grid_poly) #------------------------------------------------------------------------------ # Visualizar a resposta do modelo dentro e fora da amostra #------------------------------------------------------------------------------ import matplotlib.pyplot as plt fig = plt.figure ( figsize=(14,6) ) pin = plt.subplot ( "121" ) plt.ylim ( -1.5 , 1.5 ) pout = plt.subplot ( "122" ) plt.ylim ( -1.5 , 1.5 ) # Quadro com resultado DENTRO da amostra (conjunto de treinamento) pin.title.set_text ( 'Aproximacao de grau '+ str(polynomial_degree) + '\nDesempenho DENTRO da amostra:' + '\n R2 = ' + str ( '%.4f' % R2_in ) + ' RMSE = ' + str ( '%.4f' % RMSE_in) ) pin.plot ( X_grid, y_grid , color = 'grey' , linestyle = 'dotted' , alpha = 0.5 , label='Funcao alvo (desconhecida)' ) pin.scatter ( X_train , y_train , color = 'red' , marker = 'o' , s = 30 , alpha = 0.5 , label = 'Conjunto de Treinamento' ) pin.scatter ( X_train , y_train_pred , color = 'blue' , marker = 'x' , s = 60 , alpha = 0.5 , label = 'Respostas do Modelo' ) pin.plot ( X_grid, y_grid_pred, color = 'blue' , linestyle = 'solid' , alpha = 0.25, label='Funcao correspondente ao modelo' ) # Quadro com resultado FORA da amostra (conjunto de teste) pout.title.set_text ( 'Aproximacao de grau '+ str(polynomial_degree) + '\nDesempenho FORA da amostra:' + '\n R = ' + str ( '%.4f' % R2_out ) + ' RMSE = ' + str ( '%.4f' % RMSE_out) ) pout.plot ( X_grid , y_grid , color = 'grey' , linestyle = 'dashed' , alpha = 0.5, label = 'Funcao alvo (desconhecida)' ) pout.scatter ( X_test , y_test , color = 'green' , marker = 'o' , s = 30 , alpha = 0.5 , label = 'Conjunto de Teste' ) pout.scatter ( X_test , y_test_pred , color = 'blue' , marker = 'x' , s = 60 , alpha = 0.5 , label = 'Respostas do Modelo' ) pout.plot ( X_grid , y_grid_pred , color = 'blue' , linestyle = 'solid' , alpha = 0.25, label='Funcao correspondente ao modelo' ) plt.show() #raise SystemExit() #------------------------------------------------------------------------------ # Verificar erro DENTRO e FORA da amostra em funcao do grau do polinomio #------------------------------------------------------------------------------ print('\nParametros do regressor:\n', np.append( lr.intercept_ , lr.coef_ ) ) #raise SystemExit() #------------------------------------------------------------------------------ # Exibir os coeficientes do polinomio #------------------------------------------------------------------------------ #print ( ' Grau Erro IN Erro OUT') #print ( ' ---- ------- --------') # #for degree in range(1,21): # # pf = PolynomialFeatures(degree) # lr = LinearRegression() # # X_train_poly = pf.fit_transform(X_train) # lr = lr.fit(X_train_poly, y_train) # # y_train_pred = lr.predict(X_train_poly) # # X_test_poly = pf.transform(X_test) # y_test_pred = lr.predict(X_test_poly) # # RMSE_in = math.sqrt ( mean_squared_error ( y_train , y_train_pred ) ) # RMSE_out = math.sqrt ( mean_squared_error ( y_test , y_test_pred ) ) # # print ( str ( ' %2d' % degree ) + ' ' + # str ( '%10.4f' % RMSE_in ) + ' ' + # str ( '%10.4f' % RMSE_out ) # )
[ "heraldo.ufrj@gmail.com" ]
heraldo.ufrj@gmail.com
440d156989c7d14212ee7acec2a615fa1d0d34cc
f75f9c0e7192170a5846c0b726b10e645d5812b7
/tests/test_models.py
845a6eaf73b1e3765e21211184bc835c50c73de7
[ "MIT" ]
permissive
mzbotr/betfair.py
6feff7250fec38c31ef9c89fc15a057c935d7274
dca804a4eaf999af54c53589e9559409fae26d6f
refs/heads/master
2021-01-21T06:02:35.902807
2015-06-15T04:05:51
2015-06-15T04:05:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,563
py
# -*- coding: utf-8 -*- import pytest from enum import Enum from schematics.types import StringType from betfair.meta.types import EnumType from betfair.meta.types import ModelType from betfair.meta.models import BetfairModel def test_field_inflection(): class FakeModel(BetfairModel): underscore_separated_field = StringType() record = FakeModel(underscoreSeparatedField='test') assert record.underscore_separated_field == 'test' serialized = record.serialize() assert 'underscoreSeparatedField' in serialized assert serialized['underscoreSeparatedField'] == 'test' FakeEnum = Enum( 'TestEnum', [ 'val1', 'val2', ] ) @pytest.mark.parametrize(['input', 'expected'], [ ('val1', 'val1'), (FakeEnum.val1, 'val1'), ]) def test_enum_type(input, expected): class FakeModel(BetfairModel): enum_field = EnumType(FakeEnum) datum = FakeModel(enum_field=input) datum.validate() serialized = datum.serialize() assert serialized['enumField'] == expected class Child(BetfairModel): child_name = StringType() class Parent(BetfairModel): parent_name = StringType() child = ModelType(Child) def test_nested_model(): parent = Parent(parent_name='mom', child=dict(child_name='kid')) expected = { 'parentName': 'mom', 'child': { 'childName': 'kid', }, } assert parent.serialize() == expected def test_nested_model_unserialize_rogue(): Parent(parent_name='dad', child=dict(child_name='kid', rogue='rogue'))
[ "jm.carp@gmail.com" ]
jm.carp@gmail.com
d4275b4c2da2fb0801bf73f1f333a1a8ad719ed9
6bd9d9366351cf2aa57a4fd23d0ed6a56b5270c7
/Day6Noran.py
7c4964125902231ac5a73cf92f9c5ccfcd4ad267
[]
no_license
NoranAB/100daysofcodes
671f6bfaf5c657f3d779843b5d05a33aa1bf28e0
c716563f7388ff7cfd2044a0f6097178bc08705b
refs/heads/master
2020-07-10T10:19:41.417762
2019-11-21T05:19:23
2019-11-21T05:19:23
204,239,718
0
0
null
null
null
null
UTF-8
Python
false
false
426
py
#Example1 x = int(2) #x will be 1 y = int(3.6) # y will be 2 z = int("4") # z will be 3 print (x) print(y) print(z) #Example2 x = float(2) # x will be 1.0 y = float (3.6) # y will be 3.6 z = float ("4") # z will be 4.0 n = float ("4.6") # n will be 4.6 print (x) print(y) print(z) print(n) #Example3 i = str ("sl") # i will be 'sl' j = str (4) # j will be '4' z = str (6.0) # z will be '6.0' print (i) print (j) print (z)
[ "noranoz41@gmail.com" ]
noranoz41@gmail.com
da2379011ed33e44d8e763b226628d29407fa4b1
8111f7c7118c04357ff94678f3ec0fa0506337be
/Mysite/static/data/script/mysql_test.py
0c4fc75e406f1f44e389708b13ae9b5d3a19034c
[]
no_license
Riners/Data-Visualization
3b7ba0ef0d4aad8d5e97d18d41ebb3b62a65d6d8
3c308d06840c8a205afd7b25f186efffe9584403
refs/heads/master
2020-03-21T11:12:54.584843
2018-06-24T16:50:48
2018-06-24T16:50:48
138,494,436
0
0
null
null
null
null
UTF-8
Python
false
false
2,137
py
import pymysql pymysql.install_as_MySQLdb() db = pymysql.connect("193.112.0.249", "root", "Admin@123", "mysite", charset='utf8' ) cursor = db.cursor() cursor.execute("SELECT VERSION()") # data = cursor.fetchone() # city = ['BJ','CD','GZ','HZ','GL','HF','LZ','NC','NJ','SH','SJZ','SZ','TJ','TY','WH','WLMQ','YN','ZZ'] # # print(len(city)) # # import json # ret_you = 0 # ret_liang = 0 # ret_QD = 0 # ret_ZD = 0 # # # for i in city: # sql = 'select Quality from %s' % i # cursor.execute(sql) # res = cursor.fetchall() # res = list(res) # for i in list(res): # if i[0] == '优': # ret_you += 1 # elif i[0] == '良': # ret_liang += 1 # elif i[0] == '轻度污染': # ret_QD += 1 # else: # ret_ZD += 1 # # data = [{'value':ret_you,'name':'优'},{'value':ret_liang,'name':'良'},{'value':ret_QD,'name':'轻度污染'},{'value':ret_ZD,'name':'重度污染'}] # # print(ret_you,ret_liang,ret_QD,ret_ZD) # print(data) city = ["北京","天津","河北","山西","内蒙古","辽宁","吉林","黑龙江","上海","江苏","浙江","安徽","福建","江西","山东","河南","湖北","湖南","重庆","四川","贵州","云南","西藏","陕西","甘肃","青海","宁夏","新疆","广东","广西","海南"] l = [64, 50, 47, 45, 41, 35, 41, 40, 47, 67, 53, 50, 51, 30, 40, 38, 40, 30, 40, 40, 38, 20, 31, 58, 51, 59, 59, 63, 87, 83, 88] AQI = [] cursor.execute('select AQI from all_city_airdata;') res = cursor.fetchall() for i in list(res): AQI.append(list(i)[0]) # print(AQI) data = [] # def bb(n): # dic = {} # # dic['name'] = city[n] # dic['value'] = AQI[n] # data.append(dic) # # for n in range(len(city)): # bb(n) PM2_5 = [] cursor.execute('select PM2_5 from all_city_airdata;') res = cursor.fetchall() for i in list(res): PM2_5.append(list(i)[0]) print(PM2_5) PM10 = [] cursor.execute('select PM10 from all_city_airdata;') res = cursor.fetchall() for i in list(res): PM10.append(list(i)[0]) print(PM10)
[ "920113448@qq.com" ]
920113448@qq.com
fc9b8add7d7714e6e622faf7435163e9b948f206
e04455817ed6a9497cb8b661a6d5780f211e4dd7
/meetup_app/app/scheduler.py
3aa6acbfbab76a55afdf75eda3a98ae44ec459cd
[]
no_license
smkukorea/meetup
942dba2f3e302d3dbdc666acada365a88fe788d7
c5bafd8e66484a6d2f371a91d0f4c88ec55ecc29
refs/heads/master
2022-12-13T12:10:51.856093
2020-01-15T21:49:46
2020-01-15T21:49:46
233,226,539
0
0
null
2022-12-08T07:02:47
2020-01-11T12:14:04
HTML
UTF-8
Python
false
false
3,700
py
from app.models import Event from datetime import datetime, timedelta class Schedule: def __init__(self, event): dates = event.dates.split(",") self.days = [datetime.strftime(date, "%a") for date in (datetime.strptime(date, "%m/%d/%Y") for date in dates)] self.dates = [datetime.strftime(date, "%m/%d") for date in (datetime.strptime(date, "%m/%d/%Y") for date in dates)] start_time_parsed = datetime.strptime(event.start, "%I:%M %p") end_time_parsed = datetime.strptime(event.end, "%I:%M %p") self.times = [datetime.strftime(date, "%I:%M %p") for date in datetime_range(start_time_parsed, end_time_parsed, timedelta(minutes=15))] def personal_to_event(user_schedule, event): dates = event.dates.split(",") times = [datetime.strftime(date, "%I:%M %p") for date in datetime_range(datetime.strptime(event.start, "%I:%M %p"), datetime.strptime(event.end, "%I:%M %p"), timedelta(minutes=15))] user_availability = {} short_dates = [datetime.strftime(date, "%m/%d") for date in (datetime.strptime(date, "%m/%d/%Y") for date in dates)] days_of_week = [datetime.strftime(date, "%A") for date in (datetime.strptime(date, "%m/%d/%Y") for date in dates)] for i in range(len(short_dates)): for time in times: user_availability[short_dates[i] + " " + time] = user_schedule[days_of_week[i] + " " + time] return user_availability def create_overlap(schedule, user_avail): id_list = [] for time in schedule.times: for date in schedule.dates: id_list.append(date + " " + time) overall_avail = {} avail_max = 0 for id1 in id_list: overall_avail[id1] = 0 for availability in user_avail.values(): for id1 in id_list: if availability[id1]: overall_avail[id1] += 1 if overall_avail[id1] > avail_max: avail_max = overall_avail[id1] colors = linear_gradient("#f0f0f0", "#5f7eed", avail_max+1) color_dict = {} for id1 in id_list: color_dict[id1] = colors[overall_avail[id1]] return color_dict def datetime_range(start, end, delta): current = start while current < end: yield current current += delta #source for color interpolation functions: https://bsou.io/posts/color-gradients-with-python def hex_to_RGB(hex): ''' "#FFFFFF" -> [255,255,255] ''' # Pass 16 to the integer function for change of base return [int(hex[i:i+2], 16) for i in range(1,6,2)] def RGB_to_hex(RGB): ''' [255,255,255] -> "#FFFFFF" ''' # Components need to be integers for hex to make sense RGB = [int(x) for x in RGB] return "#"+"".join(["0{0:x}".format(v) if v < 16 else "{0:x}".format(v) for v in RGB]) def linear_gradient(start_hex, finish_hex, n): ''' returns a gradient list of (n) colors between two hex colors. start_hex and finish_hex should be the full six-digit color string, inlcuding the number sign ("#FFFFFF") ''' # Starting and ending colors in RGB form s = hex_to_RGB(start_hex) f = hex_to_RGB(finish_hex) # Initilize a list of the output colors with the starting color RGB_list = [start_hex] # Calcuate a color at each evenly spaced value of t from 1 to n for t in range(1, n): # Interpolate RGB vector for color at the current value of t curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3) ] # Add it to our list of output colors RGB_list.append(RGB_to_hex(curr_vector)) return RGB_list
[ "smkukorea@gmail.com" ]
smkukorea@gmail.com
61511f49964ca71e6a0f6d8c8c5023828b810084
55909fd5282ea210f2221fc467f71f9ed41b0bef
/Aula 13/ex056.py
5a65082bf9d2352ec7ab655f7557494215f5ccf6
[ "MIT" ]
permissive
alaanlimaa/Python_CVM1-2-3
163ecd8c9145f2d332e6574d8923373b87a2e1f5
6d9a9bd693580fd1679a1d0b23afd26841b962a6
refs/heads/main
2023-06-18T16:07:59.930804
2021-07-20T16:22:01
2021-07-20T16:22:01
387,841,441
0
0
null
null
null
null
UTF-8
Python
false
false
671
py
midade = Hmaior = nomevelho = contM20 = 0 for p in range(1, 5): print('-=-' * 10) print(f'{p}º pessoa ') nome = str(input('Nome: ')).strip() idade = int(input('Idade: ')) sexo = str(input('Sexo [F/M]: ')).strip()[0] midade += idade if p == 1 and sexo in 'Mm': Hmaior = idade nomevelho = nome if sexo in 'Mm' and idade > Hmaior: Hmaior = idade nomevelho = nome if sexo in 'Ff' and idade < 20: contM20 += 1 print(f'A média de idade do grupo é {midade / p:.2f} anos') print(f'O homem mais velho tem {Hmaior} anos eo seu nome é {nomevelho}') print(f'São {contM20} mulheres menores de 20 anos')
[ "alanlimabusiness@outlook.com" ]
alanlimabusiness@outlook.com
f86e5caae131e7fb0503f683c8c5ebdba4847ccf
2c40670b2f745eeac43c6f0b2c60581c36a30302
/models/product.py
4c0758320d0bc7da138e82cbd2aa89399b924b71
[]
no_license
minsiang97/aroparts
a193beff24677c4ae43614e04d1b997d99014fca
f504017aa0cd2587a50dee1c4eaae39a6c46f515
refs/heads/master
2023-02-20T21:34:35.634658
2021-01-26T06:36:11
2021-01-26T06:36:11
321,933,007
0
0
null
null
null
null
UTF-8
Python
false
false
818
py
from models.base_model import BaseModel from models.category import Category from models.sub_category import SubCategory import peewee as pw class Product(BaseModel): name=pw.CharField(unique=False, null=False) chinese_name = pw.CharField(unique=False, null=True) description_line_1=pw.TextField(null=True) description_line_2=pw.TextField(null=True) description_line_3=pw.TextField(null=True) description_line_4=pw.TextField(null=True) description_line_5=pw.TextField(null=True) description_line_6=pw.TextField(null=True) description_line_7=pw.TextField(null=True) description_line_8=pw.TextField(null=True) description_line_9=pw.TextField(null=True) description_line_10=pw.TextField(null=True) price=pw.CharField(null=False) image_path=pw.CharField(null=True)
[ "ongminsiang@gmail.com" ]
ongminsiang@gmail.com
780d129680d02e195525b521a06e2740039f7066
34aee9d503a16b5645b7a0057f5713d862076552
/chap_4/c4_36_2.py
10877b09e0410681888dcb931b766996c6e2b3d4
[]
no_license
meshidenn/nlp100
05724788429325f0fe83997c175feeca070f2d7b
aa1543240595e84f7a125a80774d8b345308c1bc
refs/heads/master
2021-01-13T07:55:07.732976
2017-04-17T15:16:49
2017-04-17T15:16:49
71,709,977
0
0
null
null
null
null
UTF-8
Python
false
false
1,249
py
import c4_30_2 as p30 import csv def mk_set(sentence): buf = [] # for sentence in sentences: for line in sentence: buf.append(line['base']) wset = set(buf) return wset def w_dict(wset,sentence): wdict = {} for word in wset: wdict[word] = 1 return wdict def w_freq(wdict,sentence): wfreq = {} # buf = [] n = 0 for k, v in wdict.items(): n += 1 # for sentence in sentences: for line in sentence: if line['surface'] == k: v += 1 print(k,v,n) wfreq[k] = v return wfreq def fsort(wfreq): swfreq = sorted(wfreq.items(),key=lambda x:x[1], reverse = True) return swfreq def main(): sentence = p30.sub() wset = mk_set(sentence) wdict = w_dict(wset,sentence) wfreq = w_freq(wdict,sentence) swfreq = fsort(wfreq) return swfreq if __name__ == '__main__': freq = main() print(type(freq)) with open('neko.txt.freq','w') as file: for x in freq: print(x[0],x[1]) file.write('{},{}\n'.format(x[0].rstrip('\n'), x[1])) print(freq) # for noun in nouns: # print(noun)
[ "hiroki@Hiroki-no-MacBook-Air.local" ]
hiroki@Hiroki-no-MacBook-Air.local
91a3a764327f1035a70607756d25158415b24c6d
327238868408aef99fb4342a8349b9e52f926db2
/diya-stack.py
bd7dc0be8b1896110a99bdb7aa3dac0acb2c6aef
[]
no_license
veitsi/algomix
7f8142f5b1808988431fa68a426790af86e4cc9d
a349239ec6068d1c85289174b84d579d70c7540f
refs/heads/master
2016-08-08T23:25:13.906076
2016-02-26T12:14:07
2016-02-26T12:14:07
52,602,254
0
0
null
null
null
null
UTF-8
Python
false
false
1,128
py
class Stack: def __init__(self, size=10): if size < 2: return None self.size = size self.__stack = [None for i in range(size)] self.__position = -1 def __str__(self): return str(self.__stack[:self.__position + 1]) def push(self, x): if type(x) != type(1): return None if self.__position == self.size - 1: return None self.__position += 1 self.__stack[self.__position] = x return self def pop(self): if self.__position == -1: return None rez = self.__stack[self.__position] self.__position -= 1 return rez assert Stack() is not None assert Stack(10) is not None # assert Stack(1)==None assert Stack().push(1) is not None assert Stack().push(1).push(2) is not None st1 = Stack().push(1).push(2) assert str(st1) == "[1, 2]" assert st1.pop() == 2 p2 = st1.pop() print(p2) assert p2 == 1 assert st1.pop() is None assert st1.push(0x0A) is not None assert st1.push(Stack()) is None st1 = Stack(3) assert st1.push(1).push(2).push(3).push(4) is None
[ "xtfkpi@gmail.com" ]
xtfkpi@gmail.com
8d635e9d142e94d2d614b952b1484d4895c00490
930481737a3dae878d4db0f6ead9e43c710ccb8a
/QAMedicalKG/answer_search.py
224afe8aebdf0c3643099cbe5a2634355f87bec5
[]
no_license
zhoujx4/Knowledge-Graph
9ccd544955d1e69ea7b748d31e9626d5a2928510
6759c380f368a915cf292fe90bf6d421e9a427a8
refs/heads/main
2023-04-16T21:53:40.759374
2021-04-21T10:41:09
2021-04-21T10:41:09
360,130,153
1
0
null
null
null
null
UTF-8
Python
false
false
6,221
py
from py2neo import Graph class AnswerSearcher: def __init__(self): self.g = Graph("http://localhost:7474", username="neo4j", password="2012051171id") self.num_limit = 20 '''执行cypher查询,并返回相应结果''' def search_main(self, sqls): final_answers = [] for sql_ in sqls: question_type = sql_['question_type'] queries = sql_['sql'] answers = [] for query in queries: ress = self.g.run(query).data() ## answers += ress final_answer = self.answer_prettify(question_type, answers) if final_answer: final_answers.append(final_answer) return final_answers '''根据对应的qustion_type,调用相应的回复模板''' def answer_prettify(self, question_type, answers): final_answer = [] if not answers: return '' if question_type == 'disease_symptom': desc = [i['n.name'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}的症状包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'symptom_disease': desc = [i['m.name'] for i in answers] subject = answers[0]['n.name'] final_answer = '症状{0}可能染上的疾病有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_cause': desc = [i['m.cause'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}可能的成因有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_prevent': desc = [i['m.prevent'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}的预防措施包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_lasttime': desc = [i['m.cure_lasttime'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}治疗可能持续的周期为:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_cureway': desc = [';'.join(i['m.cure_way']) for i in answers] subject = answers[0]['m.name'] final_answer = '{0}可以尝试如下治疗:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_cureprob': desc = [i['m.cured_prob'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}治愈的概率为(仅供参考):{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_easyget': desc = [i['m.easy_get'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}的易感人群包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_desc': desc = [i['m.desc'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0},熟悉一下:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_acompany': desc1 = [i['n.name'] for i in answers] desc2 = [i['m.name'] for i in answers] subject = answers[0]['m.name'] desc = [i for i in desc1 + desc2 if i != subject] final_answer = '{0}的症状包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_not_food': desc = [i['n.name'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}忌食的食物包括有:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_do_food': do_desc = [i['n.name'] for i in answers if i['r.name'] == '宜吃'] recommand_desc = [i['n.name'] for i in answers if i['r.name'] == '推荐食谱'] subject = answers[0]['m.name'] final_answer = '{0}宜食的食物包括有:{1}\n推荐食谱包括有:{2}'.format(subject, ';'.join(list(set(do_desc))[:self.num_limit]), ';'.join(list(set(recommand_desc))[:self.num_limit])) elif question_type == 'food_not_disease': desc = [i['m.name'] for i in answers] subject = answers[0]['n.name'] final_answer = '患有{0}的人最好不要吃{1}'.format(';'.join(list(set(desc))[:self.num_limit]), subject) elif question_type == 'food_do_disease': desc = [i['m.name'] for i in answers] subject = answers[0]['n.name'] final_answer = '患有{0}的人建议多试试{1}'.format(';'.join(list(set(desc))[:self.num_limit]), subject) elif question_type == 'disease_drug': desc = [i['n.name'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}通常的使用的药品包括:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'drug_disease': desc = [i['m.name'] for i in answers] subject = answers[0]['n.name'] final_answer = '{0}主治的疾病有{1},可以试试'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'disease_check': desc = [i['n.name'] for i in answers] subject = answers[0]['m.name'] final_answer = '{0}通常可以通过以下方式检查出来:{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) elif question_type == 'check_disease': desc = [i['m.name'] for i in answers] subject = answers[0]['n.name'] final_answer = '通常可以通过{0}检查出来的疾病有{1}'.format(subject, ';'.join(list(set(desc))[:self.num_limit])) return final_answer if __name__ == '__main__': searcher = AnswerSearcher()
[ "zhoujx@gzsendi.cn" ]
zhoujx@gzsendi.cn
169d1b34052601f7372457060040c76fbb71fe6b
498d65615aeba1f7399344a32a23514e057fb30e
/decode_verify_jwt.py
224caf0f4e6b9ae7531dc23017880f0ac6b66eee
[]
no_license
gautamamber/Blog-Serverless-chalice
54fd128f76a3e918a170225bb49ded0874089a61
e1735c5bb617bdb9720b5ecf847ea32833d7e5bc
refs/heads/master
2020-08-04T15:17:44.405145
2019-10-02T14:33:32
2019-10-02T14:33:32
212,181,532
1
0
null
null
null
null
UTF-8
Python
false
false
1,896
py
from urllib.request import urlopen import json import os import time from constant import Constants from jose import jwk, jwt from jose.utils import base64url_decode region = 'us-west-2' userpool_id = Constants.COGNITO_POOL_ID app_client_id = Constants.COGNITO_CLIENT keys_url = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpool_id) # instead of re-downloading the public keys every time # we download them only on cold start # https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/ response = urlopen(keys_url) keys = json.loads(response.read())['keys'] def token_verification(token): # get the kid from the headers prior to verification headers = jwt.get_unverified_headers(token) kid = headers['kid'] # search for the kid in the downloaded public keys key_index = -1 for i in range(len(keys)): if kid == keys[i]['kid']: key_index = i break if key_index == -1: return False # construct the public key public_key = jwk.construct(keys[key_index]) # get the last two sections of the token, # message and signature (encoded in base64) message, encoded_signature = str(token).rsplit('.', 1) # decode the signature decoded_signature = base64url_decode(encoded_signature.encode('utf-8')) # verify the signature if not public_key.verify(message.encode("utf8"), decoded_signature): return False # since we passed the verification, we can now safely # use the unverified claims claims = jwt.get_unverified_claims(token) # additionally we can verify the token expiration if time.time() > claims['exp']: return False # and the Audience (use claims['client_id'] if verifying an access token) if claims['aud'] != app_client_id: return False # now we can use the claims return claims
[ "ambergautam1@gmail.com" ]
ambergautam1@gmail.com
14c96d4d72ae9dfe1358798816a363a197cb1919
f8c6ae3401bba7521019a91dec17984a1500670c
/models/imagenet/resnet.py
0f1e99f68b6cca2af2cf00e9498ecc7716324d78
[ "MIT" ]
permissive
rs9899/Parsing-R-CNN
a9b10d164ff632d510734156fdcc003b799899f2
a0c9ed8850abe740eedf8bfc6e1577cc0aa3fc7b
refs/heads/master
2022-11-18T01:57:53.461316
2020-07-20T11:14:58
2020-07-20T11:14:58
277,079,950
0
0
MIT
2020-07-20T10:38:16
2020-07-04T09:34:04
null
UTF-8
Python
false
false
15,079
py
""" Creates a ResNet Model as defined in: Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. (2015 CVPR). Deep Residual Learning for Image Recognition. Copyright (c) Yang Lu, 2017 """ import torch import torch.nn as nn import models.ops as ops from utils.net import make_norm class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, base_width=64, stride=1, dilation=1, norm='bn', conv='normal', context='none', ctx_ratio=0.0625, stride_3x3=False, downsample=None): super(BasicBlock, self).__init__() if conv == 'normal': conv_op = nn.Conv2d elif conv == 'deform': conv_op = ops.DeformConvPack elif conv == 'deformv2': conv_op = ops.ModulatedDeformConvPack else: raise ValueError('{} type conv operation is not supported.'.format(conv)) assert context in ['none', 'se', 'gcb'] width = int(planes * (base_width / 64.)) self.conv1 = conv_op(inplanes, width, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False) self.bn1 = make_norm(width, norm=norm, an_k=10 if planes < 256 else 20) self.conv2 = conv_op(width, width, kernel_size=3, stride=1, dilation=dilation, padding=dilation, bias=False) self.bn2 = make_norm(width, norm=norm, an_k=10 if planes < 256 else 20) if context == 'none': self.ctx = None elif context == 'se': self.ctx = ops.SeConv2d(width, int(width * ctx_ratio)) elif context == 'gcb': self.ctx = ops.GlobalContextBlock(width, int(width * ctx_ratio)) else: raise ValueError('{} type context operation is not supported.'.format(context)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.ctx is not None: out = self.ctx(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, base_width=64, stride=1, dilation=1, norm='bn', conv='normal', context='none', ctx_ratio=0.0625, stride_3x3=False, downsample=None): super(Bottleneck, self).__init__() if conv == 'normal': conv_op = nn.Conv2d elif conv == 'deform': conv_op = ops.DeformConvPack elif conv == 'deformv2': conv_op = ops.ModulatedDeformConvPack else: raise ValueError('{} type conv operation is not supported.'.format(conv)) (str1x1, str3x3) = (1, stride) if stride_3x3 else (stride, 1) width = int(planes * (base_width / 64.)) self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=str1x1, bias=False) self.bn1 = make_norm(width, norm=norm.split('_')[-1]) self.conv2 = conv_op(width, width, kernel_size=3, stride=str3x3, dilation=dilation, padding=dilation, bias=False) self.bn2 = make_norm(width, norm=norm, an_k=10 if planes < 256 else 20) self.conv3 = nn.Conv2d(width, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = make_norm(planes * self.expansion, norm=norm.split('_')[-1]) if context == 'none': self.ctx = None elif context == 'se': self.ctx = ops.SeConv2d(planes * self.expansion, int(planes * self.expansion * ctx_ratio)) elif context == 'gcb': self.ctx = ops.GlobalContextBlock(planes * self.expansion, int(planes * self.expansion * ctx_ratio)) elif context == 'nonlocal': self.ctx = ops.NonLocal2d(planes * self.expansion, int(planes * self.expansion * ctx_ratio), planes * self.expansion, use_gn=True) elif context == 'msa': self.ctx = ops.MS_NonLocal2d(planes * self.expansion, int(planes * self.expansion * ctx_ratio), planes * self.expansion, use_gn=True) else: raise ValueError('{} type context operation is not supported.'.format(context)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.ctx is not None: out = self.ctx(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class AlignedBottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, base_width=64, stride=1, dilation=1, norm='bn', conv='normal', context='none', ctx_ratio=0.0625, stride_3x3=False, downsample=None): super(AlignedBottleneck, self).__init__() if conv == 'normal': conv_op = nn.Conv2d elif conv == 'deform': conv_op = ops.DeformConvPack elif conv == 'deformv2': conv_op = ops.ModulatedDeformConvPack else: raise ValueError('{} type conv operation is not supported.'.format(conv)) width = int(planes * (base_width / 64.)) self.conv1_1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=1, padding=0, bias=False) self.bn1_1 = make_norm(width, norm=norm.split('_')[-1]) self.conv1_2 = conv_op(width, width, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False) self.conv2_1 = nn.Conv2d(inplanes, width // 2, kernel_size=1, stride=1, padding=0, bias=False) self.bn2_1 = make_norm(width // 2, norm=norm.split('_')[-1]) self.conv2_2 = conv_op(width // 2, width // 2, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False) self.bn2_2 = make_norm(width // 2, norm=norm, an_k=10 if planes < 256 else 20) self.conv2_3 = conv_op(width // 2, width // 2, kernel_size=3, stride=1, dilation=dilation, padding=dilation, bias=False) self.bn_concat = make_norm(width + (width // 2), norm=norm, an_k=10 if planes < 256 else 20) self.conv = nn.Conv2d(width + (width // 2), planes * self.expansion, kernel_size=1, stride=1, padding=0, bias=False) self.bn = make_norm(planes * self.expansion, norm=norm.split('_')[-1]) if context == 'none': self.ctx = None elif context == 'se': self.ctx = ops.SeConv2d(planes * self.expansion, int(planes * self.expansion * ctx_ratio)) elif context == 'gcb': self.ctx = ops.GlobalContextBlock(planes * self.expansion, int(planes * self.expansion * ctx_ratio)) else: raise ValueError('{} type context operation is not supported.'.format(context)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x branch1 = self.conv1_1(x) branch1 = self.bn1_1(branch1) branch1 = self.relu(branch1) branch1 = self.conv1_2(branch1) branch2 = self.conv2_1(x) branch2 = self.bn2_1(branch2) branch2 = self.relu(branch2) branch2 = self.conv2_2(branch2) branch2 = self.bn2_2(branch2) branch2 = self.relu(branch2) branch2 = self.conv2_3(branch2) out = torch.cat((branch1, branch2), 1) out = self.bn_concat(out) out = self.relu(out) out = self.conv(out) out = self.bn(out) if self.ctx is not None: out = self.ctx(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, bottleneck=True, aligned=False, use_3x3x3stem=False, stride_3x3=False, avg_down=False, base_width=64, layers=(3, 4, 6, 3), norm='bn', stage_with_conv=('normal', 'normal', 'normal', 'normal'), stage_with_context=('none', 'none', 'none', 'none'), ctx_ratio=16, num_classes=1000): """ Constructor Args: layers: config of layers, e.g., (3, 4, 23, 3) num_classes: number of classes """ super(ResNet, self).__init__() if aligned: block = AlignedBottleneck else: if bottleneck: block = Bottleneck else: block = BasicBlock self.expansion = block.expansion self.stride_3x3 = stride_3x3 self.avg_down = avg_down self.base_width = base_width self.norm = norm self.ctx_ratio = ctx_ratio self.inplanes = 64 self.use_3x3x3stem = use_3x3x3stem if not self.use_3x3x3stem: self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False) self.bn1 = make_norm(self.inplanes, norm=self.norm.split('_')[-1]) else: self.conv1 = nn.Conv2d(3, self.inplanes // 2, 3, 2, 1, bias=False) self.bn1 = make_norm(self.inplanes // 2, norm=self.norm.split('_')[-1]) self.conv2 = nn.Conv2d(self.inplanes // 2, self.inplanes // 2, 3, 1, 1, bias=False) self.bn2 = make_norm(self.inplanes // 2, norm=self.norm.split('_')[-1]) self.conv3 = nn.Conv2d(self.inplanes // 2, self.inplanes, 3, 1, 1, bias=False) self.bn3 = make_norm(self.inplanes, norm=self.norm.split('_')[-1]) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], 1, conv=stage_with_conv[0], context=stage_with_context[0]) self.layer2 = self._make_layer(block, 128, layers[1], 2, conv=stage_with_conv[1], context=stage_with_context[1]) self.layer3 = self._make_layer(block, 256, layers[2], 2, conv=stage_with_conv[2], context=stage_with_context[2]) self.layer4 = self._make_layer(block, 512, layers[3], 2, conv=stage_with_conv[3], context=stage_with_context[3]) self.avgpool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(512 * self.expansion, num_classes) self._init_weights() @property def stage_out_dim(self): return [64, 64 * self.expansion, 128 * self.expansion, 256 * self.expansion, 512 * self.expansion] @property def stage_out_spatial(self): return [1 / 2., 1 / 4., 1 / 8., 1 / 16., 1 / 32.] def _init_weights(self): # weight initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): if not isinstance(m, (ops.MixtureBatchNorm2d, ops.MixtureGroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.0001) nn.init.constant_(m.bias, 0) # zero init deform conv offset for m in self.modules(): if isinstance(m, ops.DeformConvPack): nn.init.constant_(m.conv_offset.weight, 0) nn.init.constant_(m.conv_offset.bias, 0) if isinstance(m, ops.ModulatedDeformConvPack): nn.init.constant_(m.conv_offset_mask.weight, 0) nn.init.constant_(m.conv_offset_mask.bias, 0) # zero gamma for last bn of each block for m in self.modules(): if isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) elif isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, AlignedBottleneck): nn.init.constant_(m.bn.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilation=1, conv='normal', context='none'): """ Stack n bottleneck modules where n is inferred from the depth of the network. Args: block: block type used to construct ResNet planes: number of output channels (need to multiply by block.expansion) blocks: number of blocks to be built stride: factor to reduce the spatial dimensionality in the first bottleneck of the block. Returns: a Module consisting of n sequential bottlenecks. """ downsample = None if stride != 1 or self.inplanes != planes * block.expansion: if self.avg_down: downsample = nn.Sequential( nn.AvgPool2d(kernel_size=stride, stride=stride), nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), make_norm(planes * block.expansion, norm=self.norm.split('_')[-1]), ) else: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), make_norm(planes * block.expansion, norm=self.norm.split('_')[-1]), ) layers = [] layers.append(block(self.inplanes, planes, self.base_width, stride, dilation, self.norm, conv, context, self.ctx_ratio, self.stride_3x3, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, self.base_width, 1, dilation, self.norm, conv, context, self.ctx_ratio, self.stride_3x3)) return nn.Sequential(*layers) def forward(self, x): if not self.use_3x3x3stem: x = self.conv1(x) x = self.bn1(x) x = self.relu(x) else: x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x
[ "priv@bupt.edu.cn" ]
priv@bupt.edu.cn
26d7c06f88ff8b77fb6eb704335b28197ac7b3ac
49c2492d91789b3c2def7d654a7396e8c6ce6d9f
/ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/generate_cached_setup.py
063f8efb2e5b20ed1335dd677a45fae2675a3513
[]
no_license
DavidHan008/lockdpwn
edd571165f9188e0ee93da7222c0155abb427927
5078a1b08916b84c5c3723fc61a1964d7fb9ae20
refs/heads/master
2021-01-23T14:10:53.209406
2017-09-02T18:02:50
2017-09-02T18:02:50
102,670,531
0
2
null
2017-09-07T00:11:33
2017-09-07T00:11:33
null
UTF-8
Python
false
false
1,508
py
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import os import stat import sys # find the import for catkin's python package - either from source space or from an installed underlay if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')): sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python')) try: from catkin.environment_cache import generate_environment_script except ImportError: # search for catkin package in all workspaces and prepend to path for workspace in "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel;/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/devel;/opt/ros/indigo".split(';'): python_path = os.path.join(workspace, 'lib/python2.7/dist-packages') if os.path.isdir(os.path.join(python_path, 'catkin')): sys.path.insert(0, python_path) break from catkin.environment_cache import generate_environment_script code = generate_environment_script('/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel/.private/vrep_skeleton_msg_and_srv/env.sh') output_filename = '/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/setup_cached.sh' with open(output_filename, 'w') as f: #print('Generate script for cached setup "%s"' % output_filename) f.write('\n'.join(code)) mode = os.stat(output_filename).st_mode os.chmod(output_filename, mode | stat.S_IXUSR)
[ "gyurse@gmail.com" ]
gyurse@gmail.com
94ebb85f7e1d639046a1fbace4b86ac3803e0d57
1118d761c307a6861e3469176a12a8cf7f9d440a
/forms.py
bc1e8eecf491633f3d5fc8f4cdf42518937b4847
[]
no_license
Bendricks40/FlaskSocialNetwork
be2fac6a3fdb95c3870d0f6b9ebe6a0c74c6942a
7ed8b197132d8127c533a0ea08bce42cfb0ce003
refs/heads/master
2020-08-05T14:02:40.798267
2019-10-03T12:56:56
2019-10-03T12:56:56
205,247,338
0
0
null
null
null
null
UTF-8
Python
false
false
1,779
py
from flask_wtf import Form from wtforms import StringField, PasswordField, TextAreaField from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo) from models import User def name_exists(form, field): if User.select().where(User.username == field.data).exists(): raise ValidationError('User with that name already exists') def email_exists(form, field): if User.select().where(User.email == field.data).exists(): raise ValidationError('User with that email already exists') class RegisterForm(Form): username = StringField( 'Username', validators=[ DataRequired(), Regexp( r'^[a-zA-Z0-9_]+$', message=("Username should be one word, letters," "numbers and underscores only.") ), name_exists ]) email = StringField( 'Email', validators=[ DataRequired(), Email(), email_exists ]) password = PasswordField('Password', validators=[ DataRequired(), Length(min=2), EqualTo('password2', message = 'Passwords must match!') ]) password2 = PasswordField('Confirm password', validators=[ DataRequired() ]) class LoginForm(Form): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired()]) class PostForm(Form): content = TextAreaField("What's up?", validators=[DataRequired()])
[ "bendricks40@gmail.com" ]
bendricks40@gmail.com
c867bd9e02a9a83c26a20ce87ccafeab9d03df8d
e1ee812a873da2f1aeba893f36d842ea4a8ea4b4
/test.py
6113e3f2d7725b4d4d394bbcecda68a492788ac8
[ "MIT" ]
permissive
CanDenizKas/FizzBuzz-Game
26125c018b56c7bf571e1e43deea88f286215429
d1198d602bce8393c991867bdd1b011aba6ad785
refs/heads/master
2023-03-04T05:46:47.267568
2021-02-06T12:27:34
2021-02-06T12:27:34
335,057,636
0
0
null
null
null
null
UTF-8
Python
false
false
226
py
i = 0 while i <= 99: i += 1 if i % 15 == 0: print("Fizz") continue if i % 3 == 0: print("Buzz") continue if i % 5 == 0: print("FizzBuzz") continue print(i)
[ "candenizkas@gmail.com" ]
candenizkas@gmail.com
1a13ca652346bb8acc24d400245201aa9439116b
af0d4f18b32cfc8f5d492eb0f8def373da318571
/review.py
1e1455183d8db667010f15639861908f17fd9d0c
[]
no_license
chbae/gitlab-review
e254beb61a646c031143332d2466227eace989c4
107fcfe5013695bf369642b698491d6b7c89a514
refs/heads/master
2020-08-11T02:10:22.792614
2019-10-11T15:16:43
2019-10-14T11:56:09
214,468,845
0
0
null
null
null
null
UTF-8
Python
false
false
1,833
py
#!/usr/bin/env python import sys import json import gitlab def main(uri): gl = gitlab.Gitlab.from_config('swfactory', ['/Users/chanbae/gitlab/gitlab.cfg']) try: gl.auth() except Exception as ex: print('Error: ', ex) url = uri.split('/') prj = gl.projects.get(url[3]+'/'+url[4]) mr = prj.mergerequests.get(url[6]) print("\033[1;35;40m============================================== MR ==================================================\033[1;37;40m") print("\033[1;34;40mFrom (%s): \033[1;37;40m%s/%s/tree/%s" % ( mr.attributes['author']['name'], mr.attributes['author']['web_url'], url[4], mr.attributes['source_branch'])) print("\033[1;34;40mTitle:\033[1;37;40m %s" % ( mr.attributes['title'])) commits = mr.commits() count = 0 print("\033[1;35;40m============================================= COMMITS ===============================================\033[1;37;40m") for commit in commits: print("\033[1;35;40m[%d] START -------------------------------------------------------------------------------------------\033[1;37;40m" % count) print("\033[1;34;40mcommit title (%d): \033[1;37;40m%s" % (len(commit.title), commit.title)) print("\033[1;34;40mcommit id: \033[1;37;40m%s" % (commit.id)) print("\033[1;34;40mcommit message:\033[1;37;40m\n%s" % (commit.message)) diff = commit.diff() print("\033[1;34;40mcommit diff:\033[1;37;40m\n%s" % (diff[0]['diff'])) print("\033[1;35;40m[%d] END ---------------------------------------------------------------------------------------------\033[1;37;40m" % count) print("\n") count = count + 1 if __name__ == '__main__': if len(sys.argv) == 2: main(sys.argv[1]) else: print "Please add url"
[ "changhyeok.bae@daimler.com" ]
changhyeok.bae@daimler.com
20a4283830b333bc30651ce8b8c49605032f23df
cb20aa7ca2d807d038d5e3c536a768f160d517f1
/remove_motif.py
168758d18d3760d7f109a9baaf9d68da5369d2d5
[]
no_license
mathii/spectrum
dc393e3e1e425183112f7fc2a814fd3cd416936a
e9f0f990e8c0e5aac598886109bc5e89b15c3bc0
refs/heads/master
2021-01-10T07:15:47.871805
2020-06-02T15:02:15
2020-06-02T15:02:15
54,673,436
7
1
null
null
null
null
UTF-8
Python
false
false
1,021
py
# Read a fasta file, and remove a motif # Side effect of making everything uppercase from __future__ import division, print_function import argparse, re, sys from pyfaidx import Fasta motif="CC[A-Z]CC[A-Z]T[A-Z][A-Z]CC[A-Z]C" motif_length=13 ################################################################################ def parse_options(): """ argparse """ parser=argparse.ArgumentParser() parser.add_argument('-r', '--ref', type=str, default="", help="reference fasta") return parser.parse_args() ################################################################################ def main(options): """ Iterate and remove motif (by setting to N) """ ref=Fasta(options.ref) reg=re.compile(motif) for chrom in ref.keys(): print(">"+chrom) new_seq=reg.sub("N"*motif_length, ref[chrom][:].seq.upper()) print(new_seq) ################################################################################ options=parse_options() main(options)
[ "iain_mathieson@hms.harvard.edu" ]
iain_mathieson@hms.harvard.edu
84234c762e51c88b858dc53f8d5fe2e9486ed289
897bd4c42d285478ba00712d746712c91bae4d6c
/elgraiv_fbx_export_utils/properties.py
6b56f61a7bf1d2ec8edcddc2f859c92b5aed47ad
[]
no_license
elgraiv-take/BlenderAddons
5721dfd9bf941d26b0300ba332b8a672f5ef8b1b
d413abd1efb9bd0c4fcbba681f47da8021bb48b6
refs/heads/master
2021-07-01T04:11:28.051329
2020-12-01T17:42:02
2020-12-01T17:42:02
201,735,526
0
0
null
null
null
null
UTF-8
Python
false
false
614
py
''' Created on 2019/08/11 @author: take ''' import bpy class FbxExportItem(bpy.types.PropertyGroup): item: bpy.props.PointerProperty(type=bpy.types.Object) class FbxExportSet(bpy.types.PropertyGroup): name: bpy.props.StringProperty() save_path: bpy.props.StringProperty() active_object_index: bpy.props.IntProperty() export_anim: bpy.props.BoolProperty() object_list: bpy.props.CollectionProperty(type=FbxExportItem) class FbxExportSetProperty(bpy.types.PropertyGroup): active_index: bpy.props.IntProperty() export_set_list: bpy.props.CollectionProperty(type=FbxExportSet)
[ "4583273+elgraiv-take@users.noreply.github.com" ]
4583273+elgraiv-take@users.noreply.github.com
9121aa7623fa31fd8cad9ac6cd3485cb1656a44d
a36501f44a09ca03dd1167e1d7965f782e159097
/app/modules/auth/params.py
c7dd1d359b51eb056962e44c9b871c1d299d8c4b
[ "Apache-2.0" ]
permissive
ssfdust/full-stack-flask-smorest
9429a2cdcaa3ff3538875cc74cff802765678d4b
4f866b2264e224389c99bbbdb4521f4b0799b2a3
refs/heads/master
2023-08-05T08:48:03.474042
2023-05-07T01:08:20
2023-05-07T01:08:20
205,528,296
39
10
Apache-2.0
2023-08-31T00:18:42
2019-08-31T10:12:25
Python
UTF-8
Python
false
false
2,162
py
# Copyright 2019 RedLotus <ssfdust@gmail.com> # Author: RedLotus <ssfdust@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ app.modules.auth.params ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 用户验证参数模块 """ from app.extensions import ma from marshmallow import fields class LoginParams(ma.Schema): """ 登录用参数 :attr email: str 用户邮箱 :attr password: str 密码 :attr captcha: str 验证码 :attr token: str 验证码token """ email = fields.Str(required=True, allow_none=False, description="用户邮箱") password = fields.Str(required=True, allow_none=False, description="密码") captcha = fields.Str(required=True, allow_none=False, description="验证码") token = fields.Str(required=True, allow_none=False, description="验证token") class JwtParam(ma.Schema): """ Jwt的Token参数 :attr token: str Jwt token """ token = fields.Str(required=False, allow_none=False, description="token") class PasswdParam(ma.Schema): """ 验证密码 :attr password: str 原密码 :attr confirm_password: str 确认密码 """ password = fields.Str(required=True, allow_none=False, description="密码") confirm_password = fields.Str(required=True, allow_none=False, description="确认密码") class EmailParam(ma.Schema): """ 邮箱参数 :attr email: str 邮箱 """ email = fields.Str(required=True, description="邮箱") class CaptchaParam(ma.Schema): """ 验证图片Token参数 :attr token: str 验证码token """ token = fields.Str(required=True, description="随机token")
[ "ssfdust@gmail.com" ]
ssfdust@gmail.com
b321289a8645a6f9e9af0dad8ea7b4e5903efac1
3980ffc200867875e2dffaef5551397e99a434b0
/PythonSem1/lista10.py
a8dd236a2faff6a4e8ac94a7bf2ed6a9f4fc78fd
[]
no_license
wiphand/git-repo
10ff056d7f6048246c628a28360b2a85e36f9c3d
03599508506e383e36d70c890439f53d3b1b3a72
refs/heads/master
2021-04-18T21:31:20.282174
2018-06-04T00:26:03
2018-06-04T00:26:03
126,590,890
0
0
null
null
null
null
UTF-8
Python
false
false
620
py
s="send + more"# = money" cyfry=[9,8,7,6,5,4,3,2,1,0] dictMain={} def analyze(s): functions=[] s=list(s) iter=0 for x in s: if(x.isalpha()): dictMain[x] = dictMain.setdefault(x,cyfry.pop()) elif(x!=" "): functions+=x print(dictMain,functions) return def evalu(String): value=0 String = list(String) String = String[::-1] print(String) values = [dictMain.setdefault(x,0) for x in String] print(values) temp=1 for x in values: value+=int(x)*temp temp*=10 return value analyze(s) print(evalu("bat"))
[ "piotrus.kuczynski@gmail.com" ]
piotrus.kuczynski@gmail.com
f135349869cce6877593dc177603adef88a8dd07
8eb2e7d0b82e26b8999c1e2f14b4fe0f7dfeab65
/scripts/run_slim_bpr_cython_baesyan.py
8262e9aefd632f8690b346aca92562dd0f270d73
[ "Apache-2.0" ]
permissive
edervishaj/spotify-recsys-challenge
c8d66cec51495bef85809dbbff183705e53a7bd4
4077201ac7e4ed9da433bd10a92c183614182437
refs/heads/master
2021-06-28T14:59:02.619439
2020-10-03T09:53:50
2020-10-03T09:53:50
150,008,507
0
0
Apache-2.0
2020-10-03T09:53:51
2018-09-23T17:31:20
Jupyter Notebook
UTF-8
Python
false
false
7,642
py
from personal.MaurizioFramework.ParameterTuning.BayesianSearch import BayesianSearch from personal.MaurizioFramework.ParameterTuning.AbstractClassSearch import DictionaryKeys from utils.definitions import ROOT_DIR import pickle from personal.MaurizioFramework.SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython from recommenders.similarity.dot_product import dot_product from utils.datareader import Datareader from utils.evaluator import Evaluator from utils.bot import Bot_v1 from tqdm import tqdm import scipy.sparse as sps import numpy as np import sys def run_SLIM_bananesyan_search(URM_train, URM_validation, logFilePath = ROOT_DIR+"/results/logs_baysian/"): recommender_class = SLIM_BPR_Cython bananesyan_search = BayesianSearch(recommender_class, URM_validation=URM_validation, evaluation_function=evaluateRecommendationsSpotify_BAYSIAN) hyperparamethers_range_dictionary = {} hyperparamethers_range_dictionary["topK"] = [100, 150, 200, 250, 300, 350, 400, 500] hyperparamethers_range_dictionary["lambda_i"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1] hyperparamethers_range_dictionary["lambda_j"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1] hyperparamethers_range_dictionary["learning_rate"] = [0.1,0.01,0.001,0.0001,0.00005,0.000001, 0.0000001] hyperparamethers_range_dictionary["minRatingsPerUser"] = [0, 5, 50, 100] logFile = open(logFilePath + recommender_class.RECOMMENDER_NAME + "_BayesianSearch Results.txt", "a") recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [], DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: { "URM_train":URM_train, "positive_threshold":0, "URM_validation":URM_validation, "final_model_sparse_weights":True, "train_with_sparse_weights":True, "symmetric" : True}, DictionaryKeys.FIT_POSITIONAL_ARGS: dict(), DictionaryKeys.FIT_KEYWORD_ARGS: { "epochs" : 5, "beta_1" : 0.9, "beta_2" : 0.999, "validation_function": evaluateRecommendationsSpotify_RECOMMENDER, "stop_on_validation":True , "sgd_mode" : 'adam', "validation_metric" : "ndcg_t", "lower_validatons_allowed":3, "validation_every_n":1}, DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary} best_parameters = bananesyan_search.search(recommenderDictionary, metric="ndcg_t", n_cases=200, output_root_path=""+logFilePath + recommender_class.RECOMMENDER_NAME, parallelPoolSize=4) logFile.write("best_parameters: {}".format(best_parameters)) logFile.flush() logFile.close() pickle.dump(best_parameters, open(logFilePath + recommender_class.RECOMMENDER_NAME + "_best_parameters", "wb"), protocol=pickle.HIGHEST_PROTOCOL) def evaluateRecommendationsSpotify_RECOMMENDER(recommender): """ THIS FUNCTION WORKS INSIDE THE RECOMMENDER :param self: :return: """ user_profile_batch = recommender.URM_train[pids_converted] eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr() recommendation_list = np.zeros((10000, 500)) for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"): val = eurm[row].data ind = val.argsort()[-500:][::-1] ind = eurm[row].indices[ind] recommendation_list[row, 0:len(ind)] = ind prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list, name=recommender.configuration+"epoca"+ str(recommender.currentEpoch), return_overall_mean=True, verbose = False, show_plot=False, do_plot=True) results_run = {} results_run["prec_t"] = prec_t results_run["ndcg_t"] = ndcg_t results_run["clicks_t"] = clicks_t results_run["prec_a"] = prec_a results_run["ndcg_a"] = ndcg_a results_run["clicks_a"] = clicks_a return (results_run) def evaluateRecommendationsSpotify_BAYSIAN(recommender, URM_validation, paramether_dictionary) : """ THIS FUNCTION WORKS INSIDE THE BAYSIAN-GRID SEARCH :param self: :return: """ user_profile_batch = recommender.URM_train[pids_converted] eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr() recommendation_list = np.zeros((10000, 500)) for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"): val = eurm[row].data ind = val.argsort()[-500:][::-1] ind = eurm[row].indices[ind] recommendation_list[row, 0:len(ind)] = ind prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list, name=recommender.configuration+"epoca"+str(recommender.currentEpoch), return_overall_mean=True, verbose= False, show_plot=False, do_plot=True) results_run = {} results_run["prec_t"] = prec_t results_run["ndcg_t"] = ndcg_t results_run["clicks_t"] = clicks_t results_run["prec_a"] = prec_a results_run["ndcg_a"] = ndcg_a results_run["clicks_a"] = clicks_a return (results_run) if __name__ == '__main__': bot = Bot_v1("keplero bananesyan slim") try: ######################SHRINKED dr = Datareader(mode="offline", train_format="100k", only_load=True) ev = Evaluator(dr) pids = dr.get_test_pids() urm, dictns, dict2 = dr.get_urm_shrinked() urm_evaluation = dr.get_evaluation_urm()[pids] pids_converted = np.array([dictns[x] for x in pids], dtype=np.int32) run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation) # dr = Datareader(mode="offline", only_load=True) # ev = Evaluator(dr) # pids = dr.get_test_pids() # # urm = dr.get_urm() # urm_evaluation = dr.get_evaluation_urm()[pids] # pids_converted = pids # # run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation) except Exception as e: bot.error("Exception "+str(e)) bot.end()
[ "scarlattitommaso@gmail.com" ]
scarlattitommaso@gmail.com
1a4b4f6a75b6fc31635917956efcfeab557469c3
c251ab19f18cb53ab04cbac49ee7d1028955e997
/PYSE_Lab2_part3.py
e0b6f355d5dbf59316d58eb607bed03f9afe99df
[]
no_license
MathiasGrun/ntnu
6fd748cc7a212398aaaae8895280c8429c8657dd
f7a862b1349faa778b4c99f1df315a4dd487bbfb
refs/heads/main
2023-01-02T12:57:55.328572
2020-10-23T16:21:32
2020-10-23T16:21:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,426
py
import simpy import numpy as np import random import matplotlib.pyplot as plt env = simpy.Environment() scheduled_interarrivals = [] actual_interarrivals = [] my_delay = 300 # Giving the delay a constant, this varies as we would like t_landing = 60 t_takeoff = 60 my_ta = 45*60 e_weather1 = 3600 e_weather2 = 7200 e_snow = 45*60 t_p = 60*10 t_i = 600 t_deicing = 600 runway = simpy.PriorityResource(env, capacity=2) # Implementing resources dTruck = simpy.Resource(env, capacity=1) def weather1(): return random.choice(np.random.exponential(e_weather1, 1000)) def weather2(): return random.choice(np.random.exponential(e_weather2, 1000)) def snow(): return random.choice(np.random.exponential(e_snow, 1000)) def badWeather(env, priority): with runway.request(env, priority) as req: snowing = random.choice([True, False]) if snowing: print("Started the system with snowy weather") while True: if snowing: t1 = env.now snowtime = weather1() while int(env.now - t1) < snowtime: timeToFillRunways = snow() if (env.now - t1) >= timeToFillRunways: print("Snow on runways. Start plowing at: ", end="") announceTime(env.now) yield req yield req yield env.timeout(t_p) yield runway.release(req) print("Finished plowing first runway at: ", end="") announceTime(env.now) yield env.timeout(t_p) yield runway.release(req) print("Finished plowing second runway at: ", end="") announceTime(env.now) break else: yield env.timeout(10) snowing = False else: un_snowtime = weather2() yield env.timeout(un_snowtime) print("Started snowing at: ", end="") announceTime(env.now) snowing = True def arrival_intensity(seconds): # This is a function that describes Table 1 in the Lab # A dictionary could also be used, but I found the "brute force" way simplest at the moment t = (seconds / 3600) % 24 # Gives time in integer hours if t > 24: return 0 elif 0< t < 5: return 0 elif 5 < t <= 8: return 120 elif 8 < t <= 11: return 30 elif 11 < t <= 15: return 150 elif 15 < t <= 20: return 30 elif 20 < t <= 24: return 120 else: return 0 def announceTime(time): day = int(time // (24 * 3600)) time = time % (24 * 3600) hour = int(time // 3600) time %= 3600 minutes = int(time // 60) time %= 60 seconds = int(time) print("Day:", day, ",", "Time: ", hour, ":", minutes, ":", seconds) def delay(): x = random.randint(0, 1) if x == 0: return 0 else: return random.choice(np.random.gamma(3, my_delay/3, 1000)) def turnaround(): return random.choice(np.random.gamma(7,my_ta/7, 1)) def inter_arrival(t_guard, t): if 0 < ((t / 3600) % 24) < 5: return (5 * 3600) - (t % 86400) t1 = random.choice(np.random.exponential(arrival_intensity(t), 1000)) if t_guard > t1: return t_guard else: return t1 def plane_generator(env, t_guard): id = 1 while True: print("\n") t_interarrival = inter_arrival(t_guard, env.now) scheduled_interarrivals.append( t_interarrival) # This simply adds the larger of the values T_guard and a random selected value from the distribution of intensities yield env.timeout( t_interarrival) # We need to "hold" the plane for the larger of t_guard and expected interarrival delay1 = delay() # To se if planes are previously delayed yield env.timeout(delay1) # Holding the plane for the time it is delayed env.process(airPlane(env, id, 2)) """if delay1 == 0: print("Plane number ", id, " arriving with no previous delay") else: print("Plane ", id, " arriving with delay: ", int(delay1/3600),"hrs, ",int(delay1/60),"minutes")""" id += 1 def airPlane(env, id, priority): with runway.request(env, priority) as req: print("Plane", id, "Requesting runway for landing at: ", end="") announceTime(env.now) yield req yield env.timeout(t_landing) print(id, " landed successfully at ", end=" ") announceTime(env.now) actual_interarrivals.append(int((env.now/3600)%24)) runway.release(req) priority=3 print(id, " initiating turn-around at: ", end="") announceTime(env.now) t_ta = turnaround() yield env.timeout(t_ta) print(id, " requesting runway for take-off at: ", end=" ") announceTime(env.now) t1 = int(env.now) yield req t2 = int(env.now) if t1 != t2: print("No runways ready when requested.") print("Had to wait ", int(t2-t1), " seconds") #yield dTruck.request(env) print("Ops, ", id, " need deicing. Starting deicing now.") yield env.timeout(t_deicing) #dTruck.release(env) print(id, "Finished deicing. Initiating take-off at: ", end=" ") announceTime(env.now) yield env.timeout(t_takeoff) runway.release(req) print(id, " airborne at: ", end="") announceTime(env.now) # Starting to add plots # Firstly, we need an array with results of arriving planes def noInterArrivalTimes(T_interarrival): amountOfArrivals = [0, 0, 0, 0, 0] for arrivaltime in scheduled_interarrivals: if arrivaltime == 60: amountOfArrivals[0] += 1 elif 60 < arrivaltime < 120: amountOfArrivals[1] += 1 elif 120 < arrivaltime < 180: amountOfArrivals[2] += 1 elif 180 < arrivaltime < 240: amountOfArrivals[3] += 1 elif arrivaltime > 240: amountOfArrivals[4] += 1 timeIntervals = ("60s", "60-120s", "120-180s","180-240", "> 240s") plt.bar(timeIntervals, amountOfArrivals, align="center") plt.xlabel("Interarrival time intervals") plt.ylabel("Amount of planes") plt.title("Amount of planes landing and within which interarrival interval") plt.show() def pltLandingPlanes(): results = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for x in actual_interarrivals: results[x] += 1 plt.bar([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], results, width=0.9, align="center") plt.xlabel("Time in hours") plt.ylabel("Planes") plt.title("Amount of planes landing each hour") plt.show() env.process(plane_generator(env, 60)) env.process(badWeather(env, 1)) env.run(3600*24*7) noInterArrivalTimes(scheduled_interarrivals) pltLandingPlanes()
[ "noreply@github.com" ]
noreply@github.com
a2d5408b706e5474559c179247f09e0d735eb299
9c1d96b350b9d372a801962e99d7b86f36693ca5
/Project Euler ~ Python/Euler015.py
7c3e21f188d0bf5d7f68b3ca98e2d8e1b59ee980
[]
no_license
CruzJeff/ProjectEuler
a4a7b6f5d2fa659c6153ee3a8a512791e3598d25
cd4962847c8b68e0a789a264b781070b14153a78
refs/heads/master
2021-05-08T08:41:37.167457
2019-01-22T05:51:53
2019-01-22T05:51:53
107,064,737
0
0
null
null
null
null
UTF-8
Python
false
false
774
py
# -*- coding: utf-8 -*- """ Created on Fri Oct 20 01:15:08 2017 @author: User """ '''For number 15, for the 2 x 2 grid we have to go down 2 times, and right 2 times to get to the opposite corner. From this we can see that for an N by N grid, you would have to go down N times, and to the right N times. So for a 20x20 grid, the question is from 40 moves, how many different ways can we choose to go down/right 20 times.''' '''In statistics this is a combination, 40 choose 20 (Because if we choose 20 positions for Down/Right, the empty spots automatically become the other direction) ''' import math def ncr(n,k): numerator = math.factorial(n) denominator = math.factorial(k) * math.factorial(n-k) return numerator/denominator print(ncr(40,20))
[ "shadowburai@yahoo.com" ]
shadowburai@yahoo.com
e13f4165f0a7f3a7ad7b1cd021765f7c007adcc7
1973805c584194e97d5a5c152778d7b3bffa5f21
/Entertainment_Center.py
e5221c179b361643794cf47b0f93c6ba48c3cc29
[]
no_license
cgscreamer/FreshlyTomatoes
d09769b89f941b42b520f448bd6b188deab523e9
a2479c791d0dd5445eef5d3350fada2dd0d2cebc
refs/heads/master
2020-03-23T14:18:56.333355
2018-07-20T09:07:54
2018-07-20T09:07:54
141,668,207
0
0
null
null
null
null
UTF-8
Python
false
false
2,064
py
import media import fresh_tomatoes toy_story = media.Movie("Toy Story", "A story of a boy and his toys that come to life", "https://images-na.ssl-images-amazon.com/images/I/91q0UP6%2BUTL._SY606_.jpg", "https://www.youtube.com/watch?v=KYz2wyBy3kc" "John Lasseter") # print (toy_story.storyline) avatar = media.Movie("Avatar", "A marine on an alien planet", "https://images-na.ssl-images-amazon.com/images/I/61OUGpUfAyL._SY679_.jpg", "https://www.youtube.com/watch?v=5PSNL1qE6VY" "James Cameron") # avatar.show_trailer() school_of_rock = media.Movie("School of Rock", "Using rock music to learn", "http://img.moviepostershop.com/the-school-of-rock-movie-poster-2003-1020191888.jpg", "https://www.youtube.com/watch?v=oP7kExN8LFA" "Richard Linklater") le_scaphandre_et_le_papillon = media.Movie("Le Scaphandre et Le Papillon", "A man relives his final moments and gains an appreciation for life", "https://m.media-amazon.com/images/M/MV5BMTc3MjkzMDkxN15BMl5BanBnXkFtZTcwODAyMTU1MQ@@._V1_UY268_CR0,0,182,268_AL_.jpg", "https://www.youtube.com/watch?v=CecAbmELolY" "Julian Schnabbel") Avengers = media.Movie("Avengers Assemble", "Heroes unite to save the world", "https://m.media-amazon.com/images/M/MV5BNDYxNjQyMjAtNTdiOS00NGYwLWFmNTAtNThmYjU5ZGI2YTI1XkEyXkFqcGdeQXVyMTMxODk2OTU@._V1_UX182_CR0,0,182,268_AL_.jpg", "https://www.youtube.com/watch?v=eOrNdBpGMv8" "Joss Whedon") movies = [toy_story, avatar, school_of_rock, le_scaphandre_et_le_papillon, Avengers] fresh_tomatoes.open_movies_page(movies)
[ "31764109+cgscreamer@users.noreply.github.com" ]
31764109+cgscreamer@users.noreply.github.com
22e4fd2f20807aa7d66e4e2d2f5ec4b4c33fde8a
32b1e7ad31be02fd70ea8aec551a60933e5d4946
/ICP_6/SVM_RBF.py
90c24ed4756e5e5fb781c6242a3ef902f90ea0ae
[]
no_license
maturivinay/python_lee
4a026b99ac5f151cfc5552fc9e62e3cbfbcad736
2cb223fe331616aba66f7a78acc6c4fe31389746
refs/heads/master
2021-07-22T04:05:39.246068
2018-12-11T00:31:05
2018-12-11T00:31:05
146,038,727
0
0
null
null
null
null
UTF-8
Python
false
false
740
py
from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.datasets import load_iris from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import train_test_split from matplotlib import pyplot as plot from sklearn import metrics iris = load_iris() X = iris.data Y = iris.target X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.80, random_state=0) for Model in [GaussianNB, SVC,LinearSVC]: clf = Model().fit(X_train, Y_train) y_pred = clf.predict(X_test) print('%s: %s' % (Model.__name__, metrics.f1_score(Y_test, y_pred, average="macro")))
[ "31376403+maturivinay@users.noreply.github.com" ]
31376403+maturivinay@users.noreply.github.com
3b31d861b09cc17c98b649bb782694b94f71ca7c
93c52ebe87f878e7fdfdf7132d8b1e0351985a73
/predix/security/acs.py
5ed5032f9bbd10dc54b9654c5ae4e91c37abfbc0
[ "MIT", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
8Mobius8/predixpy
3811ee2884ef771452c8f3e666a7f954327e4129
26c712439d57c6a292f3c06c6977132ac7ec1974
refs/heads/master
2020-12-02T18:15:04.684916
2017-07-19T16:40:14
2017-07-19T16:40:14
96,503,734
0
0
null
2017-07-07T05:52:52
2017-07-07T05:52:52
null
UTF-8
Python
false
false
12,354
py
import os import uuid import urllib import logging import predix.config import predix.service class AccessControl(object): """ Use the Access Control service to provide a more powerful authorization framework than basic User Account and Authorization (UAA) service. Access Control service provides app-specific policies without adding overhead to a UAA server that may become the entry point for several apps over time. """ def __init__(self): key = predix.config.get_env_key(self, 'zone_id') self.zone_id = os.environ.get(key) if not self.zone_id: raise ValueError("%s environment unset" % key) key = predix.config.get_env_key(self, 'uri') self.uri = os.environ.get(key) if not self.uri: raise ValueError("%s environment unset" % key) self.service = predix.service.Service(self.zone_id) def authenticate_as_client(self, client_id, client_secret): """ Will authenticate for the given client / secret. """ self.service.uaa.authenticate(client_id, client_secret) def _get_resource_uri(self, guid=None): """ Returns the full path that uniquely identifies the resource endpoint. """ uri = self.uri + '/v1/resource' if guid: uri += '/' + urllib.quote_plus(guid) return uri def get_resources(self): """ Return all of the resources in the ACS service. """ uri = self._get_resource_uri() return self.service._get(uri) def get_resource(self, resource_id): """ Returns a specific resource by resource id. """ # resource_id could be a path such as '/asset/123' so quote uri = self._get_resource_uri(guid=resource_id) return self.service._get(uri) def _post_resource(self, body): """ Create new resources and associated attributes. Example: acs.post_resource([ { "resourceIdentifier": "masaya", "parents": [], "attributes": [ { "issuer": "default", "name": "country", "value": "Nicaragua" } ], } ]) The issuer is effectively a namespace, and in policy evaluations you identify an attribute by a specific namespace. Many examples provide a URL but it could be any arbitrary string. The body is a list, so many resources can be added at the same time. """ assert isinstance(body, (list)), "POST for requires body to be a list" uri = self._get_resource_uri() return self.service._post(uri, body) def delete_resource(self, resource_id): """ Remove a specific resource by its identifier. """ # resource_id could be a path such as '/asset/123' so quote uri = self._get_resource_uri(guid=resource_id) return self.service._delete(uri) def _put_resource(self, resource_id, body): """ Update a resource for the given resource id. The body is not a list but a dictionary of a single resource. """ assert isinstance(body, (dict)), "PUT requires body to be a dict." # resource_id could be a path such as '/asset/123' so quote uri = self._get_resource_uri(guid=resource_id) return self.service._put(uri, body) def add_resource(self, resource_id, attributes, parents=[], issuer='default'): """ Will add the given resource with a given identifier and attribute dictionary. example/ add_resource('/asset/12', {'id': 12, 'manufacturer': 'GE'}) """ # MAINT: consider test to avoid adding duplicate resource id assert isinstance(attributes, (dict)), "attributes expected to be dict" attrs = [] for key in attributes.keys(): attrs.append({ 'issuer': issuer, 'name': key, 'value': attributes[key] }) body = { "resourceIdentifier": resource_id, "parents": parents, "attributes": attrs, } return self._put_resource(resource_id, body) def _get_subject_uri(self, guid=None): """ Returns the full path that uniquely identifies the subject endpoint. """ uri = self.uri + '/v1/subject' if guid: uri += '/' + urllib.quote_plus(guid) return uri def get_subjects(self): """ Return all of the subjects in the ACS service. """ uri = self._get_subject_uri() return self.service._get(uri) def get_subject(self, subject_id): """ Returns a specific subject by subject id. """ # subject_id could be a path such as '/user/j12y' so quote uri = self._get_subject_uri(guid=subject_id) return self.service._get(uri) def _post_subject(self, body): """ Create new subjects and associated attributes. Example: acs.post_subject([ { "subjectIdentifier": "/role/evangelist", "parents": [], "attributes": [ { "issuer": "default", "name": "role", "value": "developer evangelist", } ] } ]) The issuer is effectively a namespace, and in policy evaluations you identify an attribute by a specific namespace. Many examples provide a URL but it could be any arbitrary string. The body is a list, so many subjects can be added at the same time. """ assert isinstance(body, (list)), "POST requires body to be a list" uri = self._get_subject_uri() return self.service._post(uri, body) def delete_subject(self, subject_id): """ Remove a specific subject by its identifier. """ # subject_id could be a path such as '/role/analyst' so quote uri = self._get_subject_uri(guid=subject_id) return self.service._delete(uri) def _put_subject(self, subject_id, body): """ Update a subject for the given subject id. The body is not a list but a dictionary of a single resource. """ assert isinstance(body, (dict)), "PUT requires body to be dict." # subject_id could be a path such as '/asset/123' so quote uri = self._get_subject_uri(guid=subject_id) return self.service._put(uri, body) def add_subject(self, subject_id, attributes, parents=[], issuer='default'): """ Will add the given subject with a given identifier and attribute dictionary. example/ add_subject('/user/j12y', {'username': 'j12y'}) """ # MAINT: consider test to avoid adding duplicate subject id assert isinstance(attributes, (dict)), "attributes expected to be dict" attrs = [] for key in attributes.keys(): attrs.append({ 'issuer': issuer, 'name': key, 'value': attributes[key] }) body = { "subjectIdentifier": subject_id, "parents": parents, "attributes": attrs, } return self._put_subject(subject_id, body) def _get_monitoring_heartbeat(self): """ Tests whether or not the ACS service being monitored is alive. """ target = self.uri + '/monitoring/heartbeat' response = self.session.get(target) return response def is_alive(self): """ Will test whether the ACS service is up and alive. """ response = self.get_monitoring_heartbeat() if response.status_code == 200 and response.content == 'alive': return True return False def _get_policy_set_uri(self, guid=None): """ Returns the full path that uniquely identifies the subject endpoint. """ uri = self.uri + '/v1/policy-set' if guid: uri += '/' + urllib.quote_plus(guid) return uri def get_policy_sets(self): """ Return all of the policy sets in the ACS service. """ uri = self._get_policy_set_uri() return self.service._get(uri) def _put_policy_set(self, policy_set_id, body): """ Will create or update a policy set for the given path. """ assert isinstance(body, (dict)), "PUT requires body to be a dict." uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._put(uri, body) def _get_policy_set(self, policy_set_id): """ Get a specific policy set by id. """ uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._get(uri) def delete_policy_set(self, policy_set_id): """ Delete a specific policy set by id. Method is idempotent. """ uri = self._get_policy_set_uri(guid=policy_set_id) return self.service._delete(uri) def add_policy(self, name, action, resource, subject, condition, policy_set_id=None, effect='PERMIT'): """ Will create a new policy set to enforce the given policy details. The name is just a helpful descriptor for the policy. The action maps to a HTTP verb. Policies are evaluated against resources and subjects. They are identified by matching a uriTemplate or attributes. Examples: resource = { "uriTemplate": "/asset/{id}" } subject: { "attributes": [{ "issuer": "default", "name": "role" }] } The condition is expected to be a string that defines a groovy operation that can be evaluated. Examples: condition = "match.single(subject.attributes('default', 'role'), 'admin') """ # If not given a policy set id will generate one if not policy_set_id: policy_set_id = str(uuid.uuid4()) # Only a few operations / actions are supported in policy definitions if action not in ['GET', 'PUT', 'POST', 'DELETE']: raise ValueError("Invalid action") # Defines a single policy to be part of the policy set. policy = { "name": name, "target": { "resource": resource, "subject": subject, "action": action, }, "conditions": [{ "name": "", "condition": condition, }], "effect": effect, } # Body of the request is a list of policies body = { "name": policy_set_id, "policies": [policy], } result = self._put_policy_set(policy_set_id, body) return result def is_allowed(self, subject_id, action, resource_id, policy_sets=[]): """ Evaluate a policy-set against a subject and resource. example/ is_allowed('/user/j12y', 'GET', '/asset/12') """ body = { "action": action, "subjectIdentifier": subject_id, "resourceIdentifier": resource_id, } if policy_sets: body['policySetsEvaluationOrder'] = policy_sets # Will return a 200 with decision uri = self.uri + '/v1/policy-evaluation' logging.debug("URI=" + str(uri)) logging.debug("BODY=" + str(body)) response = self.service._post(uri, body) if 'effect' in response: if response['effect'] in ['NOT_APPLICABLE', 'PERMIT']: return True return False
[ "jayson.delancey@gmail.com" ]
jayson.delancey@gmail.com
c59ac3a4809a364611d8766dc31cc489c9434b3a
d261596c67a3ce9bae2373a321015fe62b088c5b
/BVE/BVEfd.py
7af140139d6c757fed10a9d8d74a85ae08734606
[]
no_license
mintDan/FluidDynamics
ada943d990a0301cce4d945a114b86abba9a0d4f
5fc0a7be3eb1c4551c2918b2c3d3bbea643afc56
refs/heads/master
2021-07-21T04:07:02.680043
2021-02-13T08:54:23
2021-02-13T08:54:23
67,929,444
0
0
null
null
null
null
UTF-8
Python
false
false
21,023
py
""" Dan Krog Numerical Solution of Barotropic Vorticity Equation vorticity, Holton page 101, Absolute vorticity eta, eta = zeta + f f is coriolis parameter, zeta is relative vorticity, zeta = nabla x (u,v)' dzeta/dt + J(psi,zeta+f) = 0 where J is Jacobian operator Barotropic fluid, pressure depends only only density. Hence, we can take the shallow water equations, and make the height constant, h and z are constant, then, vertical velocity w = 0, and the fluid is horizontally nondivergent, du/dx + dv/dy = 0 Boundary conditions, for a periodic in x, wall in ybottom and ytop psi = 0 nablapsi.n = 0 (no-slip on side walls) https://pdfs.semanticscholar.org/c937/d58642e376a098b2b319783cd121c7fbbfe9.pdf dnabla^2psi/dt = -J(psi,nabla^2psi + f)+F+D(psi) F is forcing, D is either diffusion or bottom-friction put psi = 0 on the boundary to get dpsi/ds = 0 on the boundary, this is non-inflow boundary condition for a bounded basin also, non-slip condition dpsi/dn = 0 http://empslocal.ex.ac.uk/people/staff/dbs202/cat/courses/MTMW14/notes2005.pdf Free-slip conditions on north and south wall, v = 0 on boundaries, no flow through the boundaries also, du/dy = 0 This leads to constant stream function (can just set it to 0) and also zero relative vorticity on the boundaries DIfferent methods to solve the Poisson equation Should add some terms to the BVE, like vorticity drag, forcing, viscosity Sources: Dale R Durran book Holton book lec12 pdf ==================================================== Change functions to return Perhaps instead of setting psi = 0, see if it changes if it set psi = psi_old on northern and southern boundary for Poisson solver. and hold psi_old constant throughout pseudo-time iteration on nothern and southern boundaries lige nu holder jeg zeta = 0 på boundaries, maybe i hvert fald gøre den periodic etc? maybe er min velocity fra stream function psi derivatives også forkerte lige nu er beta constant, bør den vidst ikke være, right? #Mangler denne her ikke noget? Måske?Passer det her med units? Ikke særlig godt tror jeg? psi+=U0*(Ly/2*1000-y) jo det passer vel med units faktisk, men, hvorfor er den der? I may have translated matlab code wrong, forgot python numpy slicing doesn't include last point, so x[1:nx-1] e.g Add in Arawakian grid? """ import numpy as np #from pylab import * import matplotlib.pyplot as plt import matplotlib.animation as animation from mpl_toolkits.mplot3d import axes3d #0 = zeta0 #1 -> zeta0 #2 -> 0, 0 = zeta0, 1 = zeta #3 -> 1, 1 = zeta0, 2 = zeta #4 -> 2, 2 = zeta0, 3 = zeta #5 -> 3, 3 = zeta0, 4 = zeta def SolvePoisson(psi,zeta): """ Solve Poisson Equation Holton page 465 nabla^2psi = zeta where we are solving for psi the streamfunction, and zeta is the relative vorticity We set BCs at y = top and y = bottom, but not x, i think Following lec12.pdf here From lec12.pdf, Solving poisson equation means putting some boundary condition on psi, right? Because otherwise you could add any constant to psi, and it would still be able to give some curvature nabla^2psi = f... so it makes sense, to give some unique solution, we impose boundary conditions, but i think, for the sake of calculating velocity, it also wouldn't matter if we added some constant to psi right now i'm doing psi[1:ny-1,1:nx-1] #So that means i'm not setting psi[bottom,x] but I AM setting psi[top,x] #So i think it should be psi[1:ny-2,1:nx-1] #Or maybe, x should also be changed... But, remember in Python, 1:ny-1, then 1 is included, but ny-1 is excluded, so in effect, we're doing 1:ny-2 psi is streamfunction zeta is vorticity """ #Compute streamfunction from vorticity #ζ_(i,j)^1=(((ψ_(i+1,j)-2ψ_(i,j)+ψ_(i-1,j) ))/dx^2 +((ψ_(i,j+1)-2ψ_(i,j)+ψ_(i,j-1) ))/dy^2 ) #solve psi from lec12.pdf psin = np.zeros((ny,nx),dtype=np.float64) psin = psi.copy() dtau = 0.5*0.5*(0.5*dx**2+0.5*dy**2) for r in range(500): #pseudo-time psin = psi.copy() #Interior points psi[1:ny-1,1:nx-1] = psin[1:ny-1,1:nx-1]+dtau*( +(psin[1:ny-1,2:nx]-2*psin[1:ny-1,1:nx-1]+psin[1:ny-1,0:nx-2])/dx**2 +(psin[2:ny,1:nx-1]-2*psin[1:ny-1,1:nx-1]+psin[0:ny-2,1:nx-1])/dy**2 -zeta[1:ny-1,1:nx-1]) #x = 0 boundary #Jeg tror, vi skal tage et note ud af Holtons bog, og her, skipper vi psin[1:ny-1,-1] og bruger psin[1:ny-1,-2] psi[1:ny-1,0] = psin[1:ny-1,0]+dtau*( +(psin[1:ny-1,1]-2*psin[1:ny-1,0]+psin[1:ny-1,-2])/dx**2 +(psin[2:ny,0]-2*psin[1:ny-1,0]+psin[0:ny-2,0])/dy**2 -zeta[1:ny-1,0]) #Old #psi[1:ny-1,0] = psin[1:ny-1,0]+dtau*( # +(psin[1:ny-1,1]-2*psin[1:ny-1,0]+psin[1:ny-1,-1])/dx**2 # +(psin[2:ny,0]-2*psin[1:ny-1,0]+psin[0:ny-2,0])/dy**2 # -zeta[1:ny-1,0]) #x = L boundary #Enten så gør den periodic sådan her #psi[1:ny-1,-1] = psi[1:ny-1,0] #eller sådan her, Holton psi[:,-1] = psi[:,0] #Eller gør den periodic sådan her #psi[1:ny-1,-1] = psin[1:ny-1,-1]+dtau*( #+(psin[1:ny-1,0]-2*psin[1:ny-1,-1]+psin[1:ny-1,-2])/dx**2 #+(psin[2:ny,-1]-2*psin[1:ny-1,-1]+psin[0:ny-2,-1])/dy**2 #-zeta[1:ny-1,-1]) #boundary at x = L, i'm gonna set equal to x = 0... #Boundary for psi, maybe i should remove these! #psi[:,-1] = 0 #right boundary #psi[:,0] = 0 #left boundary #What if i don't set it to 0? afaik it should be constant along the edges, but maybe not 0... #Det har i hvert fald bestemt en effekt om man sætter det til 0 eller ej.. #psi[0,:] = 0 #psi[-1,:] = 0 #I think here, we should return psi, instead of doing it implicitly.... #so, change the way the method works... #return psi def FirstStepZeta(zeta,zetan,u,v,beta): """ Euler-forward difference for first step. Holton pg 467 Denne her kan jeg indføre nogle [:] til, i stedet for[1:ny-1] fx.... tror godt vi kan gå HELT UD til boundaries Men lad os starte et sted dog When we calculate zetan for x = 0, we use x -> -2 instead of x -> -1 """ #Interior points zetan[1:ny-1,1:nx-1] = zeta[1:ny-1,1:nx-1]-dt*(\ (u[1:ny-1,2:nx]*zeta[1:ny-1,2:nx]-u[1:ny-1,0:nx-2]*zeta[1:ny-1,0:nx-2])/(2*dx)\ +(v[2:ny,1:nx-1]*zeta[2:ny,1:nx-1]-v[0:ny-2,1:nx-1]*zeta[0:ny-2,1:nx-1])/(2*dy)\ +beta*v[1:ny-1,1:nx-1]) #x = 0 zetan[1:ny-1,0] = zeta[1:ny-1,0]-dt*(\ (u[1:ny-1,1]*zeta[1:ny-1,1]-u[1:ny-1,-2]*zeta[1:ny-1,-2])/(2*dx)\ +(v[2:ny,0]*zeta[2:ny,0]-v[0:ny-2,0]*zeta[0:ny-2,0])/(2*dy)\ +beta*v[1:ny-1,0]) #x = L borders #zetan[1:ny-1,-1] = zeta[1:ny-1,-1]-dt*(\ # (u[1:ny-1,0]*zeta[1:ny-1,0]-u[1:ny-1,-2]*zeta[1:ny-1,-2])/(2*dx)\ # +(v[2:ny,-1]*zeta[2:ny,-1]-v[0:ny-2,-1]*zeta[0:ny-2,-1])/(2*dy)\ # +beta*v[1:ny-1,-1]) zetan[:,-1] = zetan[:,0] #zetan[1:ny-1,1] = zeta[1:ny-1,0]-dt*(\ # (u[1:ny-1,1]*zeta[1:ny-1,1]-u[1:ny-1,-1]*zeta[1:ny-1,-1])/(2*dx)\ # +(v[2:ny,0]*zeta[2:ny,0]-v[0:ny-2,0]*zeta[0:ny-2,0])/(2*dy)\ # +beta*v[1:ny-1,0]) #zeta = zetan def UpdateZetaLeapFrog(zetan,zeta0,zeta,u,v,beta): """ Denne her kan jeg indføre nogle [:] til, i stedet for[1:ny-1] fx.... tror godt vi kan gå HELT UD til boundaries Men lad os starte et sted dog Hmm... jeg skal faktisk også have old velocities?:O """ zetan[1:ny-1,1:nx-1] = zeta0[1:ny-1,1:nx-1]-2*dt*(\ (u[1:ny-1,2:nx]*zeta0[1:ny-1,2:nx]-u[1:ny-1,0:nx-2]*zeta0[1:ny-1,0:nx-2])/(2*dx)\ +(v[2:ny,1:nx-1]*zeta0[2:ny,1:nx-1]-v[0:ny-2,1:nx-1]*zeta0[0:ny-2,1:nx-1])/(2*dy)\ +beta*v[1:ny-1,1:nx-1]) #x = 0 zetan[1:ny-1,0] = zeta0[1:ny-1,0]-2*dt*(\ (u[1:ny-1,1]*zeta0[1:ny-1,1]-u[1:ny-1,-1]*zeta0[1:ny-1,-1])/(2*dx)\ +(v[2:ny,0]*zeta0[2:ny,0]-v[0:ny-2,0]*zeta0[0:ny-2,0])/(2*dy)\ +beta*v[1:ny-1,0]) #x = L borders zetan[1:ny-1,-1] = zeta0[1:ny-1,-1]-2*dt*(\ (u[1:ny-1,0]*zeta0[1:ny-1,0]-u[1:ny-1,-2]*zeta0[1:ny-1,-2])/(2*dx)\ +(v[2:ny,-1]*zeta0[2:ny,-1]-v[0:ny-2,-1]*zeta0[0:ny-2,-1])/(2*dy)\ +beta*v[1:ny-1,-1]) #Holton does this #zetan[:,nx-1] = zetan[:,0] #meget unstable #y = 0 zetan[0,1:nx-1] = zeta0[0,1:nx-1]-2*dt*(\ (u[0,2:nx]*zeta0[0,2:nx]-u[0,0:nx-2]*zeta0[0,0:nx-2])/(2*dx)\ +(v[1,1:nx-1]*zeta0[1,1:nx-1]-v[-1,1:nx-1]*zeta0[-1,1:nx-1])/(2*dy)\ +beta*v[0,1:nx-1]) #y = L borders zetan[-1,1:nx-1] = zeta0[-1,1:nx-1]-2*dt*(\ (u[-1,2:nx]*zeta0[-1,2:nx]-u[-1,0:nx-2]*zeta0[-1,0:nx-2])/(2*dx)\ +(v[0,1:nx-1]*zeta0[0,1:nx-1]-v[-2,1:nx-1]*zeta0[-2,1:nx-1])/(2*dy)\ +beta*v[-1,1:nx-1]) #zeta0 = zetan def UpdateZetaLeapFrogHolton(zetan,zeta0,zeta,u,v,beta,numdif): """ """ #zetan[1:ny-2,0:nx-2] = (zeta0[1:ny-2,0:nx-2] -beta*2*dt*v[1:ny-2,0:nx-2] # -2*dt*(dflx[1:ny-2,0:nx-2]+dfly[1:ny-2,0:nx-2]) # -2*dt*numdif[1:ny-2,0:nx-2]) zetan[1:ny-1,0:nx-1] = (zeta0[1:ny-1,0:nx-1] -beta*2*dt*v[1:ny-1,0:nx-1] -2*dt*(dflx[1:ny-1,0:nx-1]+dfly[1:ny-1,0:nx-1]) -2*dt*numdif[1:ny-1,0:nx-1]) zetan[:,nx-1]=zetan[:,0] def divflux(P,u,v,dx,dy): dflx = np.zeros((ny,nx)) dfly = np.zeros((ny,nx)) #FØRST dfly #han har sat 0 foran y divflux, så vi har ingen y directioon divflux langs I bottom og top #dfly[0,:] = 0*(P[1,:]*v[1,:] - P[0,:]*v[0,:])/dy; #dfly[ny-1,:] = 0*(P[ny-1,:]*v[ny-1,:] - P[ny-2,:]*v[ny-2,:])/dy; #I center: #dfly[1:ny-2,:] = (P[2:ny-1,:]*v[2:ny-1,:]-P[0:ny-3,:]*v[0:ny-3,:])/(2*dy); #NU TAGER VI dflx #% Take cyclic differences on left and right boundaries #dflx[:,0]=(P[:,1]*u[:,1]-P[:,nx-2]*u[:,nx-2])/(2*dx); #dflx[:,nx-1]= dflx[:,0]; #% take centered differences on interior points #dflx[:,1:nx-2]= (P[:,2:nx-1]*u[:,2:nx-1]-P[:,0:nx-3]*u[:,0:nx-3])/(2*dx); #FØRST dfly #han har sat 0 foran y divflux, så vi har ingen y directioon divflux langs I bottom og top dfly[0,:] = 0*(P[1,:]*v[1,:] - P[0,:]*v[0,:])/dy; dfly[ny-1,:] = 0*(P[ny-1,:]*v[ny-1,:] - P[ny-2,:]*v[ny-2,:])/dy; #I center: dfly[1:ny-1,:] = (P[2:ny,:]*v[2:ny,:]-P[0:ny-2,:]*v[0:ny-2,:])/(2*dy); #NU TAGER VI dflx #% Take cyclic differences on left and right boundaries dflx[:,0]=(P[:,1]*u[:,1]-P[:,nx-2]*u[:,nx-2])/(2*dx); dflx[:,nx-1]= dflx[:,0]; #% take centered differences on interior points dflx[:,1:nx-1]= (P[:,2:nx]*u[:,2:nx]-P[:,0:nx-2]*u[:,0:nx-2])/(2*dx); return dflx,dfly #Interessant det der sker ved boundary periodic… vi springer en gridpoint over I centered differences, look it… def Damping4(Dk4,nx,ny,U): """ Where does the damping come from? I don't remember now """ numdif = np.zeros((ny,nx)); #Do smoothing in y space for 1st derivative zero at boundaries numdif[3:ny-4,:] = Dk4*(U[5:ny-2,:] -4*U[4:ny-3,:]+6*U[3:ny-4,:] -4*U[2:ny-5,:]+U[1:ny-6,:]) numdif[2,:] = Dk4*(-3*U[1,:] +6*U[2,:]-4*U[3,:]+U[4,:]) numdif[1,:] = Dk4*(2*U[1,:] -3*U[2,:] +U[3,:]) numdif[ny-3,:] = Dk4*(-3*U[ny-2,:]+6*U[ny-3,:]-4*U[ny-3,:]+U[ny-4,:]) numdif[ny-2,:] = Dk4*(2*U[ny-2,:] -3*U[ny-3,:] + U[ny-4,:]) #%do smoothing in x space with periodicity numdif[:,2:nx-3] = numdif[:,2:nx-3]+Dk4*(U[:,4:nx-1] -4*U[:,3:nx-2]+6*U[:,2:nx-3] -4*U[:,1:nx-4]+U[:,0:nx-5]) numdif[:,1] = numdif[:,1]+Dk4*(U[:,3] -4*U[:,2]+6*U[:,1] -4*U[:,0]+U[:,nx-1]) numdif[:,0] = numdif[:,0]+Dk4*(U[:,2] -4*U[:,1]+6*U[:,0] -4*U[:,nx-1]+U[:,nx-2]) numdif[:,nx-1] = numdif[:,0] numdif[:,nx-2] = numdif[:,nx-2]+Dk4*(U[:,1] -4*U[:,0]+6*U[:,nx-2] -4*U[:,nx-3]+U[:,nx-4]) return numdif def CalcVelocity(u,v,dxpsi,dypsi,psi,dx,dy): """ Calculate velocity (u,v) from stream function psi u = -dpsi/dy v = dpsi/dx Use finite difference More closely following Holton, he does something different on last x coordinate, dxpsi[:,-1] Also note, that we do dxpsi[:,0]=(psi[:,1]-psi[:,-2])/(2*dx) and NOT dxpsi[:,0]=(psi[:,1]-psi[:,-1])/(2*dx) so, in the last psi, we jump to SECOND LAST coordinate, instead of last coordinate. so we skip the last column x = L, when calculating for x = 0 """ #Calculate gradients for air velocity #forward on bottom? Backward on top? #dypsi[0,:] = (psi[1,:] - psi[0,:])/dy #dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy #dypsi[1:-2,:] = (psi[2:-1,:]-psi[0:-3,:])/(2*dy) #Centered difference #x = 0, x = L # dxpsi[:,0]=(psi[:,1]-psi[:,-2])/(2*dx) #dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx) # #Interior #dxpsi[:,1:-2]= (psi[:,2:-1]-psi[:,0:-3])/(2*dx) #centered difference #y = 0 dypsi[0,:] = (psi[1,:] - psi[0,:])/dy #y = L dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy #Interior dypsi[1:-1,:] = (psi[2:,:]-psi[0:-2,:])/(2*dy) #Centered difference #x = 0, x = L dxpsi[:,0]=(psi[:,1]-psi[:,-2])/(2*dx) dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx) #Interior dxpsi[:,1:-1]= (psi[:,2:]-psi[:,0:-2])/(2*dx) #centered difference u = -dypsi v = dxpsi return u,v def KineticEnergy(u,v): """ Calculate Domain integrated Kinetic Energy Should be conserved. Can also be done as |nablapsi|^2 """ KE = np.sum(u**2+v**2) return KE def Enstrophy(zeta): """ Calculate Domain integrated Enstrophy Domain Integrated Enstrophy should be conserved. calculated as, |zeta+f|^2 """ Ens = np.sum(zeta*zeta) return Ens def animate(i): global zetan,u,v,zeta,psi,psin,dxpsi,dypsi,zeta0,dx,dy,t,dt #leapfrog #zetan[1:ny-1,1:nx-1] = zeta[1:ny-1,1:nx-1]-2*dt*(\ # (u[1:ny-1,2:nx]*zeta[1:ny-1,2:nx]-u[1:ny-1,0:nx-2]*zeta[1:ny-1,0:nx-2])/(2*dx)\ # +(v[2:ny,1:nx-1]*zeta[2:ny,1:nx-1]-v[0:ny-2,1:nx-1]*zeta[0:ny-2,1:nx-1])/(2*dy)\ # +beta*v[1:ny-1,1:nx-1]) #zeta = zetan t+=dt zeta0 = zeta.copy() zeta = zetan.copy() numdif = Damping4(Av4,nx,ny,zeta0) #UpdateZetaLeapFrog(zetan,zeta0,zeta,u,v,beta) UpdateZetaLeapFrogHolton(zetan,zeta0,zeta,u,v,beta,numdif) zeta0 = zeta.copy() zeta = zetan.copy() SolvePoisson(psi,zeta) # #Calculate psi streamfunction # #solve psi from lec12.pdf # psin = np.zeros((ny,nx),dtype=float64) # psin = psi.copy() # dtau = 0.5*0.5*(0.5*dx**2+0.5*dy**2) # for r in range(500): #pseudo-time # psin = psi.copy() # psi[1:ny-1,1:nx-1] = psin[1:ny-1,1:nx-1]+dtau*(\ # (psin[1:ny-1,2:nx]-2*psin[1:ny-1,1:nx-1]+psin[1:ny-1,0:nx-2])/dx**2\ # +(psin[2:ny,1:nx-1]-2*psin[1:ny-1,1:nx-1]+psin[0:ny-2,1:nx-1])/dy**2\ # +zeta[1:ny-1,1:nx-1]) # #psi[:,-1] = 0 #right boundary # #psi[:,0] = 0 #left boundary # psi[-1,:] = 0 # psi[0,:] = 0 psi+=U0*(Ly/2*1000-y) # #Calculate gradients for air velocity # #forward on bottom? Backward on top? #dypsi[0,:] = (psi[1,:] - psi[0,:])/dy #dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy #dypsi[1:-2,:] = (psi[2:-1,:]-psi[0:-3,:])/(2*dy) #Centered difference #dxpsi[:,0]=(psi[:,1]-psi[:,-1])/(2*dx) #dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx) #dxpsi[:,1:-2]= (psi[:,2:-1]-psi[:,0:-3])/(2*dx) #centered difference #u = -dypsi #v = dxpsi u,v = CalcVelocity(u,v,dxpsi,dypsi,psi,dx,dy) dflx,dfly = divflux(zeta,u,v,dx,dy) KE = KineticEnergy(u,v) Ens = Enstrophy(zeta) print("KE = {}".format(KE)) print("En = {}".format(Ens)) ax.clear() #Hvorfor skal jeg bruge denne her? Den bruger de andre animations ikke C = ax.contour(x/1000,y/1000,zeta*10**7,8,colors='black') #C = ax.contour(x/1000,y/1000,psi/100000,8,colors='black') ax.quiver(x/1000,y/1000,u,v) ax.set_title('Barotropic Vorticity Equation t = {}'.format(t)) #ax.set_xlabel('x') #ax.set_ylabel('y') ax.set_xticks([-Lx*2/6,-Lx/6,0,Lx/6,Lx*2/6]) ax.set_yticks([-Ly*2/6,-Ly/6,0,Ly/6,Ly*2/6]) ax.set_xlabel("x/km") ax.set_ylabel("y/km") #plt.xticks([-Lx/6,Lx/6],[-Lx/6,Lx/6]) #plt.xticks([-Lx/6,Lx/6]) #ax.set_xticks([0],('hey')) plt.clabel(C,inline=1,fontsize=10,fmt="%1.1f") #print(zeta) if i == 4: fig.savefig('BVE.png', bbox_inches='tight') if __name__ == "__main__": Lx = 6000 #km Ly = 6000 #km nx = 65 ny = 65 pi = 3.141592 #x = np.linspace(0,Lx,nx) #y = np.linspace(0,Ly,ny) X = np.linspace(-Lx/2,Lx/2,nx) Y = np.linspace(-Ly/2,Ly/2,ny) x,y = np.meshgrid(X*1000,Y*1000) k = 2*pi/(Lx*1000) m = pi/(Ly*1000) dx = 1000*Lx/(nx-1) dy = 1000*Ly/(ny-1) U0 = 20 #zonal wind beta = 1.62*10**(-11) #he set 0 infront? Av4 = 10**(-6) A = 10**(-4) #initial vorticity and streamfunction #We have 3 sets of zetas here, one is initial, I would assume? zeta0 #zeto0 is set to a gaussian it zeta0 = np.array(A*np.exp(-2*(k**2*x**2+m**2*y**2)),dtype=np.float64) zeta = zeta0 zetan = zeta0 #time integration parameters #hours time_end = 3*3600 #second dt = 100 #seconds? psi = np.zeros((ny,nx),dtype=np.float64) dypsi = np.zeros((ny,nx),dtype=np.float64) dxpsi = np.zeros((ny,nx),dtype=np.float64) u = np.zeros((ny,nx),dtype=np.float64) v = np.zeros((ny,nx),dtype=np.float64) dfly = np.zeros((ny,nx),dtype=np.float64) dflx = np.zeros((ny,nx),dtype=np.float64) SolvePoisson(psi,zeta) #Compute streamfunction from vorticity #ζ_(i,j)^1=(((ψ_(i+1,j)-2ψ_(i,j)+ψ_(i-1,j) ))/dx^2 +((ψ_(i,j+1)-2ψ_(i,j)+ψ_(i,j-1) ))/dy^2 ) # #solve psi from lec12.pdf # psin = np.zeros((ny,nx),dtype=float64) # psin = psi.copy() # dtau = 0.5*0.5*(0.5*dx**2+0.5*dy**2) # for r in range(500): #pseudo-time # psin = psi.copy() # psi[1:ny-1,1:nx-1] = psin[1:ny-1,1:nx-1]+dtau*(\ # (psin[1:ny-1,2:nx]-2*psin[1:ny-1,1:nx-1]+psin[1:ny-1,0:nx-2])/dx**2\ # +(psin[2:ny,1:nx-1]-2*psin[1:ny-1,1:nx-1]+psin[0:ny-2,1:nx-1])/dy**2\ # +zeta[1:ny-1,1:nx-1]) # #Boundary for psi, maybe i should remove these! # #psi[:,-1] = 0 #right boundary # #psi[:,0] = 0 #left boundary # psi[-1,:] = 0 # psi[0,:] = 0 psi+=U0*(Ly/2*1000 - y) #Calculate gradients for air velocity #forward on bottom? Backward on top? # dypsi[0,:] = (psi[1,:] - psi[0,:])/dy # dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy # dypsi[1:-2,:] = (psi[2:-1,:]-psi[0:-3,:])/(2*dy) #Centered difference # dxpsi[:,0]=(psi[:,1]-psi[:,-1])/(2*dx) # dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx) # dxpsi[:,1:-2]= (psi[:,2:-1]-psi[:,0:-3])/(2*dx) #centered difference # u = -dypsi # v = dxpsi u,v = CalcVelocity(u,v,dxpsi,dypsi,psi,dx,dy) dflx,dfly = divflux(zeta,u,v,dx,dy) #Forward time difference FirstStepZeta(zeta0,zeta,u,v,beta) # zetan[1:ny-1,1:nx-1] = zeta[1:ny-1,1:nx-1]-dt*(\ # (u[1:ny-1,2:nx]*zeta[1:ny-1,2:nx]-u[1:ny-1,0:nx-2]*zeta[1:ny-1,0:nx-2])/(2*dx)\ # +(v[2:ny,1:nx-1]*zeta[2:ny,1:nx-1]-v[0:ny-2,1:nx-1]*zeta[0:ny-2,1:nx-1])/(2*dy)\ # +beta*v[1:ny-1,1:nx-1]) # zeta = zetan t=0 t += dt #plt.hold(True) fig = plt.figure() ax = plt.gca() #ax.contour(x,y,zeta,colors='black') #ax.quiver(x,y,u,v) ax.set_title('Barotropic Vorticity Equation') ax.set_xlabel('x') ax.set_ylabel('y') #ax.set_xticks([0]) #ax.set_yticks([0]) #ax.set_xticks([Lx/3,0],["hey","loL"]) #ax.set_yticks([Ly/3,2*Ly/3],[1,1]) #From animation function, for timestep in range(100): t+=dt zeta0 = zeta.copy() zeta = zetan.copy() #Try without the damping numdif = Damping4(Av4,nx,ny,zeta0) #UpdateZetaLeapFrog(zetan,zeta0,zeta,u,v,beta) UpdateZetaLeapFrogHolton(zetan,zeta0,zeta,u,v,beta,numdif) zeta0 = zeta.copy() zeta = zetan.copy() SolvePoisson(psi,zeta) # #Calculate psi streamfunction # #solve psi from lec12.pdf # psin = np.zeros((ny,nx),dtype=float64) # psin = psi.copy() # dtau = 0.5*0.5*(0.5*dx**2+0.5*dy**2) # for r in range(500): #pseudo-time # psin = psi.copy() # psi[1:ny-1,1:nx-1] = psin[1:ny-1,1:nx-1]+dtau*(\ # (psin[1:ny-1,2:nx]-2*psin[1:ny-1,1:nx-1]+psin[1:ny-1,0:nx-2])/dx**2\ # +(psin[2:ny,1:nx-1]-2*psin[1:ny-1,1:nx-1]+psin[0:ny-2,1:nx-1])/dy**2\ # +zeta[1:ny-1,1:nx-1]) # #psi[:,-1] = 0 #right boundary # #psi[:,0] = 0 #left boundary # psi[-1,:] = 0 # psi[0,:] = 0 psi+=U0*(Ly/2*1000-y) # #Calculate gradients for air velocity # #forward on bottom? Backward on top? #dypsi[0,:] = (psi[1,:] - psi[0,:])/dy #dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy #dypsi[1:-2,:] = (psi[2:-1,:]-psi[0:-3,:])/(2*dy) #Centered difference #dxpsi[:,0]=(psi[:,1]-psi[:,-1])/(2*dx) #dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx) #dxpsi[:,1:-2]= (psi[:,2:-1]-psi[:,0:-3])/(2*dx) #centered difference #u = -dypsi #v = dxpsi u,v = CalcVelocity(u,v,dxpsi,dypsi,psi,dx,dy) dflx,dfly = divflux(zeta,u,v,dx,dy) KE = KineticEnergy(u,v) Ens = Enstrophy(zeta) print("KE = {}".format(KE)) print("En = {}".format(Ens)) ax.clear() #Hvorfor skal jeg bruge denne her? Den bruger de andre animations ikke C = ax.contour(x/1000,y/1000,zeta*10**7,8,colors='black') #C = ax.contour(x/1000,y/1000,psi/100000,8,colors='black') ax.quiver(x/1000,y/1000,u,v) ax.set_title('Barotropic Vorticity Equation t = {}'.format(t)) #ax.set_xlabel('x') #ax.set_ylabel('y') ax.set_xticks([-Lx*2/6,-Lx/6,0,Lx/6,Lx*2/6]) ax.set_yticks([-Ly*2/6,-Ly/6,0,Ly/6,Ly*2/6]) ax.set_xlabel("x/km") ax.set_ylabel("y/km") #plt.xticks([-Lx/6,Lx/6],[-Lx/6,Lx/6]) #plt.xticks([-Lx/6,Lx/6]) #ax.set_xticks([0],('hey')) plt.clabel(C,inline=1,fontsize=10,fmt="%1.1f") #print(zeta) #=========================== #USe animation function # anim = animation.FuncAnimation( # fig, # animate, # #frames=5, # interval=0.5, # blit=False #blit=False default, # ) # plt.show()
[ "dankroga@gmail.com" ]
dankroga@gmail.com
4cc745dedd64bfe8e4037da4f3c4b8f946bf51af
14416ce6e18f3a456ce8a197ccfe7a3772959e01
/used_car_price_prediction.py
cb28daad1632215077880b3dbf323efce0c1bfc8
[]
no_license
tanya-suri/car-price-prediction
0b08b00993c3b33734d63f22dc5d1f9bfd26d595
16e222c33c8851a99e28a3e36ba3e8d24ced5a7b
refs/heads/master
2023-06-08T23:19:25.154160
2021-02-04T20:30:46
2021-02-04T20:30:46
336,060,981
0
0
null
null
null
null
UTF-8
Python
false
false
9,975
py
#!/usr/bin/env python # coding: utf-8 # # By - Samiksha Bhavsar # # USED CAR PREDICTION PRICE # <img src="https://www.marketingdonut.co.uk/sites/default/files/styles/landing_pages_lists/public/usedcardealer1.jpg?itok=lSSEdwpY"> # # Problem definition # # This is the first step of machine learning life cycle.Here we analyse what kind of problem is, how to solve it. # So for this project we are using a car dataset, where we want to predict the selling price of car based on its certain features. # Since we need to find the real value, with real calculation, therefore this problem is regression problem. # We will be using regression machine learning algorithms to solve this problem. # In[2]: #loading required libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns get_ipython().run_line_magic('matplotlib', 'inline') # # Data Gathering # In[3]: #I have used the raw link for the csv file from my github repositiory. #url = 'https://raw.githubusercontent.com/SamikshaBhavsar/ML-end_to_end_project/main/car_price_project/car_dataset.csv' url='car_dataset.csv' #read this file with the help of pandas. dataset = pd.read_csv(url) #if you have already downloaded the csv file into our project folder then use : # dataset = pd.read_csv("car_dataset.csv") #print first five rows of the dataset dataset.head() # # Data Preparation # In[4]: #checking no. of rows and columns in dataset dataset.shape # This dataset contains 301 rows and 9 columns # In[5]: #Checking the data type of columns. #this step is important because sometimes dataset may contain wrong datatype of the feature. dataset.info() # Good! every data type is correctly mentioned. We need not to make any changes. # In[6]: #check statistical summary of all the columns with numerical values. dataset.describe() # In[7]: #check if there is any missing value in the dataset dataset.isnull().sum() # There are no missing values in the dataset # # Feature Engineering # In[8]: #adding a column with the current year dataset['Current_Year']=2020 dataset.head(5) # In[9]: #creating a new column which will be age of vehicles; new feature dataset['Vehicle_Age']=dataset['Current_Year'] - dataset['Year'] dataset.head(5) # In[10]: #getting dummies for these columns with help of pandas library dataset=pd.get_dummies(dataset,columns=['Fuel_Type','Transmission','Seller_Type'],drop_first=True) #dropping the columns which are redundant and irrelevant dataset.drop(columns=['Year'],inplace=True) dataset.drop(columns=['Current_Year'],inplace=True) dataset.drop(columns=['Car_Name'],inplace=True) #check out the dataset with new changes dataset.head() # <ul>Fuel_Type feature: # <li>Fuel is Petrol if Fuel_type_diesel = 0 ,Fuel_Type_Petrol = 1</li> # <li>Fuel is Diesel if Fuel_type_diesel = 1 ,Fuel_Type_Petrol = 0</li> # <li>Fuel is cng if Fuel_type_diesel = 0 ,Fuel_Type_Petrol = 0</li> # </ul> # <ul>Transmission feature: # <li>transmission is manual if Transmission_Manual = 1</li> # <li>transmission is automatic if Transmission_Manual = 0</li></ul> # <ul>Seller_Type feature: # <li>Seller_Type is Individual if Seller_Type_Individual = 1 </li> # <li>Seller_Type is dealer if Seller_Type_Individual = 0</li> </ul> # # # ### Pairplot # In[11]: #to see pairwise relationships on our dataset we will check pairplot from seaborn library sns.pairplot(dataset) # ### Heat map # In[12]: #create correlation matrix correlations = dataset.corr() indx=correlations.index #plot this correlation for clear visualisation plt.figure(figsize=(26,22)) #annot = True , dsiplays text over the cells. #cmap = "YlGnBu" is nothing but adjustment of colors for our heatmap sns.heatmap(dataset[indx].corr(),annot=True,cmap="YlGnBu") #amount of darkness shows how our features are correalated with each other # #### I have skipped the EDA part as the main idea is to create the ml model. # #### Try to do some visualizations, in order to understand the features of this dataset. # ### Features and target variable # In[13]: # taking all the features except "selling price" X=dataset.iloc[:,1:] # taking "selling price" as y , as it is our target variable y=dataset.iloc[:,0] # ### Feature Importance # In[14]: #checking and comparing the importance of features from sklearn.ensemble import ExtraTreesRegressor #creating object model = ExtraTreesRegressor() #fit the model model.fit(X,y) print(model.feature_importances_) # In[15]: #plot graph of feature importances for better visualization feat_importances = pd.Series(model.feature_importances_, index=X.columns) # considering top 5 important features feat_importances.nlargest(5).plot(kind='barh') plt.show() # ### Splitting data into training and testing # In[16]: #splitting the data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) # # Fitting and evaluating different models # Here I am using three models : # 1. Linear Regression # 2. Decision Tree # 3. Random forest Regressor # # I will fit these models and then choose one with the better accuracy. # You can use any regression model as per your choice. # ## Linear Regression Model # In[17]: from sklearn.linear_model import LinearRegression #creating object for linear regression reg=LinearRegression() #fitting the linear regression model reg.fit(X_train,y_train) # Predict on the test data: y_pred y_pred = reg.predict(X_test) #metrics from sklearn import metrics #print mean absolute error print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) #print mean squared error print('MSE:', metrics.mean_squared_error(y_test, y_pred)) #print the root mean squared error print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) #print R2 metrics score R2 = metrics.r2_score(y_test,y_pred) print('R2:',R2) # ## Decision tree Model # In[18]: from sklearn.tree import DecisionTreeRegressor #creating object for Decision tree tree = DecisionTreeRegressor() #fitting the decision tree model tree.fit(X_train,y_train) # Predict on the test data: y_pred y_pred = tree.predict(X_test) #print errors from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) R2 = metrics.r2_score(y_test,y_pred) print('R2:',R2) # ## Random Forest Model # In[19]: from sklearn.ensemble import RandomForestRegressor #creating object for Random forest regressor rf = RandomForestRegressor(n_estimators = 100, random_state = 42) #fitting the rf model rf.fit(X_train,y_train) # Predict on the test data: y_pred y_pred = rf.predict(X_test) #print errors from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) R2 = metrics.r2_score(y_test,y_pred) print('R2:',R2) # #### We want our R2 score to be maximum and other errors to be minimum for better results # ### Random forest regressor is giving better results. therefore we will hypertune this model and then fit, predict. # # Hyperparamter tuning # In[20]: #n_estimators = The number of trees in the forest. n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)] print(n_estimators) # In[21]: from sklearn.model_selection import RandomizedSearchCV #Randomized Search CV # Number of trees in random forest n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.linspace(5, 30, num = 6)] # max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10, 15, 100] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 5, 10] # In[22]: # Create the random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} print(random_grid) # In[23]: # Use the random grid to search for best hyperparameters # First create the base model to tune rf = RandomForestRegressor() # In[24]: # Random search of parameters, using 3 fold cross validation, # search across 100 different combinations rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 100, cv = 5, verbose=2, random_state=42, n_jobs = 1) # In[25]: #fit the random forest model rf_random.fit(X_train,y_train) # In[26]: #displaying the best parameters rf_random.best_params_ # In[27]: rf_random.best_score_ # # Final Predictions # In[28]: #predicting against test data y_pred=rf_random.predict(X_test) #print the erros print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) R2 = metrics.r2_score(y_test,y_pred) print('R2:',R2) # # Save the model # In[29]: import pickle # open a file, where you ant to store the data file = open('car_price_model_1.pkl', 'wb') # dump information to that file pickle.dump(rf_random, file) # In[ ]:
[ "tanya@invoid.co" ]
tanya@invoid.co
927b2fe90bc09f83f8e059e2b52cb49cb5721c15
d72e5bd9da5089b889ff4a02b3e7d0c3135e3aa4
/googleAudioProject/userManagement/UserManager.py
ec008b740ad2dec7474609cfe1e95d9178fa51a3
[]
no_license
BU-NU-CLOUD-F19/Google_Photos_for_Audio
b07c28827fdf50f913d11ee498ad41db57ad7be3
28e3c42ff10c1740af99d8924326958345406427
refs/heads/master
2022-12-17T00:26:38.681841
2019-12-08T03:12:42
2019-12-08T03:12:42
207,849,309
3
3
null
2022-12-08T06:42:28
2019-09-11T15:45:29
JavaScript
UTF-8
Python
false
false
2,928
py
from __future__ import print_function # Python 2/3 compatibility import decimal import boto3 import json from boto3.dynamodb.conditions import Key, Attr from botocore.exceptions import ClientError import hashlib, binascii, os import re regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$' def hash_password(password): """Hash a password for storing.""" salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii') pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000) pwdhash = binascii.hexlify(pwdhash) return (salt + pwdhash).decode('ascii') def verify_password(stored_password, provided_password): """Verify a stored password against one provided by user""" salt = stored_password[:64] stored_password = stored_password[64:] pwdhash = hashlib.pbkdf2_hmac('sha512', provided_password.encode('utf-8'), salt.encode('ascii'), 100000) pwdhash = binascii.hexlify(pwdhash).decode('ascii') return pwdhash == stored_password # Helper class to convert a DynamoDB item to JSON. # class DecimalEncoder(json.JSONEncoder): # def default(self, o): # if isinstance(o, decimal.Decimal): # if abs(o) % 1 > 0: # return float(o) # else: # return int(o) # return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name='us-east-2') class UserManager(object): def __init__(self, email, password): self.email = email.lower() self.password = password def new_user(self): table = dynamodb.Table('users') response = table.query( KeyConditionExpression=Key('email').eq(self.email) ) return response.get('Count') == 0 def add_user(self): table = dynamodb.Table('users') response = table.put_item( Item={ 'email': self.email, 'password': self.password, } ) # print("AddUser succeeded:") # print(json.dumps(response, indent=4, cls=DecimalEncoder)) def success_login(self): table = dynamodb.Table('users') try: response = table.get_item( Key={ 'email': self.email } ) except ClientError as e: print(e.response['Error']['Message']) else: user = response['Item'] print("Get user succeeded:") print(user) provided_password = user.get('password') return verify_password(provided_password, self.password) # print(json.dumps(user, indent=4, cls=DecimalEncoder)) # Check email validation def check(self): return re.search(regex,self.email)
[ "martina.song@outlook.com" ]
martina.song@outlook.com
f4779c12abf558854539a29d85de3a7952d33d97
f59ea6a59360c3ee35e3bcc977279c505524df9c
/02-arrotondamento-fibonacci-pert.py
f85e6b8943713ce24271eaff0c085402fa6b989d
[]
no_license
WohthaN/MNS
e04de7409a2fe1155cb6907daea24bc836a2bdab
e3bb9011e527d7bd22cf42b71574d60383defcb9
refs/heads/master
2021-09-09T08:13:42.339718
2016-06-02T10:19:34
2016-06-02T10:19:34
125,193,078
0
0
null
null
null
null
UTF-8
Python
false
false
2,193
py
from env import * from bigfloat import sqrt, precision z1 = (1+np.sqrt(np.float64(5)))/2 + 2e-15 z2 = (1-np.sqrt(np.float64(5)))/2 c0 = np.float64(1.) c1 = (1-np.sqrt(np.float64(5)))/2 C = np.array([[1., 1.], [z1, z2]]) c = np.array([c0, c1]) Cinv = np.array([[-z2/(z1-z2), 1/(z1-z2)], [-z1/(z2-z1), 1/(z2-z1)]]) Cinv_x_C = np.dot(Cinv, C) a1_64, a2_64 = np.dot(Cinv, c) def y64(n): return a1_64 * ((1+sqrt(5))/2) ** n - a2_64 * ((1-sqrt(5))/2) ** n print("64 bit\n", end=' ') print("z1=%s, z2=%s" % (float_format(z1),float_format(z2))) print("c0=%s, c1=%s" % (float_format(c0),float_format(c1))) print("C:\n", C) print("Cinv:\n", Cinv) print("Cinv_x_C:\n", Cinv_x_C) print(("Coefficienti 64 bit:\na1: %s\na2: %s\n%s" % (float_format(a1_64), float_format(a2_64), type(a2_64)))) print("\n") z1 = (1+np.sqrt(np.float32(5)))/2 z2 = (1-np.sqrt(np.float32(5)))/2 c0 = np.float32(1.) c1 = (1-np.sqrt(np.float32(5)))/2 C = np.array([[1., 1.], [z1, z2]]).astype('float32') c = np.array([c0, c1]) Cinv = np.array([[-z2/(z1-z2), 1/(z1-z2)], [-z1/(z2-z1), 1/(z2-z1)]]).astype('float32') Cinv_x_C = np.dot(Cinv, C).astype('float32') a1_32, a2_32 = np.dot(Cinv, c) print("32 bit\n", end=' ') print("z1=%s, z2=%s" % (float_format(z1),float_format(z2))) print("c0=%s, c1=%s" % (float_format(c0),float_format(c1))) print("C:\n", C) print("Cinv:\n", Cinv) print("Cinv_x_C:\n", Cinv_x_C) print(("Coefficienti 32 bit:\na1: %s\na2: %s\n%s" % (float_format(a1_32), float_format(a2_32), type(a2_32)))) print("\n") # Soluzione equazione def y32(n): return a1_32 * np.float32((1+sqrt(5))/2) ** n - a2_32 * np.float32((1-sqrt(5))/2) ** n n_max = 50 plot_32 = [abs(y32(x)) for x in range(n_max)] plot_64 = [abs(y64(x)) for x in range(n_max)] plt.figure(figsize=FIG_SIZE_2D, dpi=FIG_DPI_2D) plt.plot(list(range(n_max)), plot_32, 'ro', color='blue', label='float32') plt.plot(list(range(n_max)), plot_64, 'rx', color='red', label='float64') plt.yscale('log') plt.grid(**GRID_OPTIONS) plt.legend(loc='upper center') plt.xlabel('n') plt.ylabel('$\log_{10} |y_n|$') plt.savefig('./figs/02-fibonacci-pert.eps', dpi=SAVE_FIG_DPI) #plt.show()
[ "carlisamuele@csspace.net" ]
carlisamuele@csspace.net
2afc98c9ae3f4253082cbcb99c14ea995ba0522c
2c4ed4e0c582ca7366d5a94b2776c8d282e3de5c
/intranetcinepel/intranetapp/migrations/0018_auto_20200713_0947.py
31c7646a5a245cc1d27d818928c3588ac8f7c700
[]
no_license
LoicJu/IntranetCinepel
3c96897cde92f31de95e1741863c799177adf73c
d5db31f3b0e1874f12982a859699e1c7261e3e8a
refs/heads/master
2022-12-11T04:08:51.862568
2020-07-24T07:47:01
2020-07-24T07:47:01
292,315,640
1
0
null
null
null
null
UTF-8
Python
false
false
11,971
py
# Generated by Django 3.0.8 on 2020-07-13 09:47 from django.db import migrations import jsonfield.fields class Migration(migrations.Migration): dependencies = [ ('intranetapp', '0017_auto_20200713_0916'), ] operations = [ migrations.AlterField( model_name='template', name='template_content', field=jsonfield.fields.JSONField(default=[{'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Lundi 01', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mardi 02', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mercredi 03', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Jeudi 04', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Vendredi 05', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Samedi 06', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Dimanche 07', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Lundi 08', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mardi 09', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mercredi 10', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Jeudi 11', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Vendredi 12', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Samedi 13', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Dimanche 14', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Lundi 15', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mardi 16', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mercredi 17', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Jeudi 18', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Vendredi 19', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Samedi 20', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Dimanche 21', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Lundi 22', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mardi 23', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Mercredi 24', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Jeudi 25', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Vendredi 26', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Samedi 27', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}, {'Appolo': {'Appolo Bar': '', 'Appolo Caisse 1': '', 'Appolo Caisse 2': '', 'Appolo Caisse Techn': '', 'Appolo Placeur 1': '', 'Appolo Placeur 2': ''}, 'Arcade': {'Arcade Bar': '', 'Arcade Caisse 1': '', 'Arcade Placeur 1': ''}, 'Bio': {'Bio Caisse': ''}, 'Date': 'Dimanche 28', 'Evenement': '', 'Rex': {'Rex Bar': '', 'Rex Caisse': ''}, 'Studio': {'Studio Bar': '', 'Studio Caisse': ''}, 'Vacance Scolaire': ''}]), ), ]
[ "loic.jurasz@he-arc.ch" ]
loic.jurasz@he-arc.ch
c70fc9e56b15f82e28bb7112b30e33ba26b542dc
be11a2e3ba05fc6073a7a12eddc23b1f85608517
/brainlit/utils/__init__.py
7b55a77314a0e10791f2f62647b339a25dddce39
[ "Apache-2.0" ]
permissive
DevinCrowley/brainlit
d2ac02e897b5932e9c013ac32c2e47ffaea7b14d
b206efe4a407bd96158ca8e664790267c2776cba
refs/heads/master
2022-11-24T21:15:27.352637
2020-07-09T20:57:05
2020-07-09T20:57:05
278,587,299
0
0
null
2020-07-10T08:59:29
2020-07-10T08:59:28
null
UTF-8
Python
false
false
172
py
from brainlit.utils.ngl_pipeline import * from brainlit.utils.upload_skeleton import * from brainlit.utils.swc import * from brainlit.utils.upload_to_neuroglancer import *
[ "bvarjavand@gmail.com" ]
bvarjavand@gmail.com
932c5570972676c8cbfdbea5edf2b2aa4bb2b139
49cf9d25147d2d87fb84fd52b5404bfffec36626
/split.py
ed548305dcbc35fe7a3b46cb9f03a44ea5694790
[ "MIT" ]
permissive
broxeph/ameryn
5963a3bacf631440ea61de750154e881af9b0acf
e1289c280ca865ec84625b712adc52c536b4b174
refs/heads/master
2021-01-10T12:54:02.736411
2017-03-07T00:18:47
2017-03-07T00:18:47
47,580,201
0
0
null
null
null
null
UTF-8
Python
false
false
19,011
py
""" Splits wave files based on cues (markers) in header, optionally exporting to mp3/FLAC with ffmpeg. (c) Ameryn Media LLC, 2015. All rights reserved. """ import struct import warnings import wave import os from sys import byteorder, version_info, exit import multiprocessing import logging import csv import numpy from mutagen.mp3 import MP3 from mutagen.id3 import ID3, APIC, error from mutagen import File from mutagen.flac import Picture from pydub import AudioSegment from send2trash import send2trash import utils import config class WavFileWarning(UserWarning): pass _big_endian = False WAVE_FORMAT_PCM = 0x0001 WAVE_FORMAT_IEEE_FLOAT = 0x0003 WAVE_FORMAT_EXTENSIBLE = 0xfffe KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT) input_filename_list = [] errors = [] # assumes file pointer is immediately after the 'fmt ' id def _read_fmt_chunk(fid): if _big_endian: fmt = '>' else: fmt = '<' res = struct.unpack(fmt + 'iHHIIHH', fid.read(20)) size, comp, noc, rate, sbytes, ba, bits = res if comp not in KNOWN_WAVE_FORMATS or size > 16: comp = WAVE_FORMAT_PCM warnings.warn("Unknown wave file format", WavFileWarning) if size > 16: fid.read(size - 16) return size, comp, noc, rate, sbytes, ba, bits # assumes file pointer is immediately after the 'data' id def _read_data_chunk(fid, comp, noc, bits, mmap=False): if _big_endian: fmt = '>i' else: fmt = '<i' size = struct.unpack(fmt, fid.read(4))[0] bytes_num = bits // 8 if bits == 8: dtype = 'u1' else: if _big_endian: dtype = '>' else: dtype = '<' if comp == 1: dtype += 'i%d' % bytes_num else: dtype += 'f%d' % bytes_num if not mmap: data = numpy.fromstring(fid.read(size), dtype=dtype) else: start = fid.tell() data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start, shape=(size // bytes_num,)) fid.seek(start + size) if noc > 1: data = data.reshape(-1, noc) return data def _skip_unknown_chunk(fid): if _big_endian: fmt = '>i' else: fmt = '<i' data = fid.read(4) # Zero-padding to avoid struct read errors data = '\0' * (4 - len(data)) + data size = struct.unpack(fmt, data)[0] # Print data fid.seek(size, 1) def _read_riff_chunk(fid): global _big_endian str1 = fid.read(4) if str1 == b'RIFX': _big_endian = True elif str1 != b'RIFF': raise ValueError("Not a WAV file.") if _big_endian: fmt = '>I' else: fmt = '<I' fsize = struct.unpack(fmt, fid.read(4))[0] + 8 str2 = fid.read(4) if (str2 != b'WAVE'): raise ValueError("Not a WAV file.") if str1 == b'RIFX': _big_endian = True return fsize # open a wave-file def read(filename, mmap=False): """ Return the sample rate (in samples/sec) and data from a WAV file Parameters ---------- filename : string or open file handle Input wav file. mmap : bool, optional Whether to read data as memory mapped. Only to be used on real files (Default: False) .. versionadded:: 0.12.0 Returns ------- rate : int Sample rate of wav file data : numpy array Data read from wav file Notes ----- * The file can be an open file or a filename. * The returned sample rate is a Python integer * The data is returned as a numpy array with a data-type determined from the file. """ if hasattr(filename, 'read'): fid = filename mmap = False else: fid = open(filename, 'rb') try: fsize = _read_riff_chunk(fid) noc = 1 bits = 8 comp = WAVE_FORMAT_PCM while fid.tell() < fsize: # read the next chunk chunk_id = fid.read(4) if chunk_id == b'fmt ': size, comp, noc, rate, sbytes, ba, bits = _read_fmt_chunk(fid) elif chunk_id == b'fact': _skip_unknown_chunk(fid) elif chunk_id == b'data': data = _read_data_chunk(fid, comp, noc, bits, mmap=mmap) elif chunk_id == b'LIST': # Someday this could be handled properly but for now skip it _skip_unknown_chunk(fid) warnings.warn("List chunk (non-data) not understood, skipping it.", WavFileWarning) elif chunk_id == b'cue ': # Someday this could be handled properly but for now skip it _skip_unknown_chunk(fid) warnings.warn("Cue chunk (non-data) not understood, skipping it.", WavFileWarning) else: warnings.warn("Chunk (non-data) not understood, skipping it.", WavFileWarning) _skip_unknown_chunk(fid) finally: if not hasattr(filename, 'read'): fid.close() else: fid.seek(0) return rate, data # Write a wave-file # sample rate, data def write(filename, rate, data): """ Write a numpy array as a WAV file Parameters ---------- filename : string or open file handle Output wav file rate : int The sample rate (in samples/sec). data : ndarray A 1-D or 2-D numpy array of either integer or float data-type. Notes ----- * The file can be an open file or a filename. * Writes a simple uncompressed WAV file. * The bits-per-sample will be determined by the data-type. * To write multiple-channels, use a 2-D array of shape (Nsamples, Nchannels). """ if hasattr(filename, 'write'): fid = filename else: fid = open(filename, 'wb') try: dkind = data.dtype.kind if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and data.dtype.itemsize == 1)): raise ValueError("Unsupported data type '%s'" % data.dtype) fid.write(b'RIFF') fid.write(b'\x00\x00\x00\x00') fid.write(b'WAVE') # fmt chunk fid.write(b'fmt ') if dkind == 'f': comp = 3 else: comp = 1 if data.ndim == 1: noc = 1 else: noc = data.shape[1] bits = data.dtype.itemsize * 8 sbytes = rate * (bits // 8) * noc ba = noc * (bits // 8) fid.write(struct.pack('<ihHIIHH', 16, comp, noc, rate, sbytes, ba, bits)) # data chunk fid.write(b'data') fid.write(struct.pack('<i', data.nbytes)) if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and byteorder == 'big'): data = data.byteswap() _array_tofile(fid, data) # Determine file size and place it in correct position at start of the file. size = fid.tell() fid.seek(4) fid.write(struct.pack('<i', size - 8)) finally: if not hasattr(filename, 'write'): fid.close() else: fid.seek(0) if version_info[0] >= 3: def _array_tofile(fid, data): # ravel gives a c-contiguous buffer fid.write(data.ravel().view('b').data) else: def _array_tofile(fid, data): fid.write(data.tostring()) def read_markers(f, mmap=False): if hasattr(f, 'read'): fid = f else: fid = open(f, 'rb') fsize = _read_riff_chunk(fid) cues = [] while (fid.tell() < fsize): chunk_id = fid.read(4) # print 'chunk_id:', chunk_id if chunk_id == b'cue ' or 'cue' in chunk_id: if chunk_id[-3:] == 'cue': # Because Audition can't create cue chunks for crap fid.read(1) size, numcue = struct.unpack('<ii', fid.read(8)) for c in range(numcue): id, position, datachunkid, chunkstart, blockstart, sampleoffset = struct.unpack('<iiiiii', fid.read(24)) if sampleoffset not in cues: cues.append(sampleoffset) elif chunk_id == b'_PMX': if chunk_id == 'totally implemented yo': print '_PMX!!' first = struct.unpack('<i', fid.read(4))[0] print 'first:', first text = fid.read(first) print 'text:', text else: _skip_unknown_chunk(fid) fid.close() return cues def split_item(item, digital_folder=config.digital_folder, dropbox_move=True): logging.info('BLOOOAAARRRRGHHGGGGHHHHH!!! (Please hold...)') audio = wave.open(item.path) audio.params = audio.getparams() # Load AudioSegment for encoded segments if item.digital_ext is not 'wav': audio_segment = AudioSegment.from_wav(item.path) # Loop through cues and write regions (assumes start and end markers) for i, track in enumerate(item.tracks): # Build track filename digital_track_name = '{0} - {1} - {2}'.format(item.digital_file_name, str(i + 1), track.title) digital_track_name = digital_track_name.replace('/', '-') digital_track_name = ''.join( [c for c in digital_track_name if c.isalpha() or c.isdigit() or c in "'- ._()!@#$%^&*"]).rstrip('. ') if dropbox_move: digital_track_path = os.path.join(item.dropbox_order_folder, digital_track_name) + '.' + item.digital_ext else: digital_track_path = os.path.join(digital_folder, digital_track_name) + '.' + item.digital_ext logging.info('Region {0} | {1} -> {2}'.format(i + 1, track.duration, digital_track_path)) # Split, export track if 'wav' not in item.digital_ext: digital_track = audio_segment[(item.cues[i] / 44.1):(item.cues[i + 1] / 44.1)] tags = {'title': track.title or 'Track {0}'.format(i + 1), 'artist': item.artist, 'albumartist': item.artist, 'album': (item.album or item.artist), 'track': (i + 1)} digital_track.export(out_f=digital_track_path, format=item.digital_ext, bitrate=item.bitrate, tags=tags, id3v2_version='3') # Add cover art if item.thumb and (item.digital_ext == 'mp3'): mutagen_audio = MP3(digital_track_path, ID3=ID3) try: # Add ID3 tag if it doesn't exist mutagen_audio.add_tags() except error: pass mutagen_audio.tags.add( APIC( encoding=3, # 3 is for utf-8 mime='image/jpeg', # image/jpeg or image/png type=3, # 3 is for the cover image desc=u'Cover', data=open(item.thumb_path, 'rb').read() ) ) mutagen_audio.save(v2_version=3) elif item.thumb and (item.digital_ext == 'flac'): mutagen_audio = File(digital_track_path) flac_image = Picture() flac_image.type = 3 mime = 'image/jpeg' flac_image.desc = 'Cover' with open(item.thumb_path, 'rb') as f: flac_image.data = f.read() mutagen_audio.add_picture(flac_image) mutagen_audio.save() else: logging.warning('No cover found for item {0}'.format(item.name)) else: digital_track = wave.open(digital_track_path, 'w') digital_track.setparams(audio.params) region_length = item.cues[i + 1] - item.cues[i] digital_track.writeframes(audio.readframes(region_length)) digital_track.close() audio.close() def split_file(input_filename, export_format='flac', bitrate='192k', tracks=None, artist='', album='', cover=None): input_filename_fullpath = os.path.join(config.server_clean_folder, input_filename) input_basename = os.path.basename(input_filename).split('.wav')[0] # Pull markers from wave file try: file_markers = read_markers(input_filename_fullpath) except IOError: errors.append(input_filename_fullpath) return ifile = wave.open(input_filename_fullpath) ifile_params = ifile.getparams() # Add start and end markers to list print input_filename if not file_markers: print print 'Waaait. Ain\'t no markers here... ({0})'.format(input_filename) print if not file_markers or file_markers[0] > 1000: print 'Start marker added.' file_markers.insert(0, 0) if not file_markers or file_markers[-1] < ifile.getnframes() - 1000: print 'End marker added.' file_markers.append(ifile.getnframes()) # Remove marker near beginning/end of track (usually silent) if file_markers[-1] - file_markers[-2] < 400000: # ~9 seconds print 'Doubled end-marker deleted.' del file_markers[-2] if file_markers[1] - file_markers[0] < 200000: # ~4.5 seconds print 'Doubled beginning-marker deleted.' del file_markers[1] for i in range(len(file_markers) - 1): if file_markers[i + 1] - file_markers[i] < 88200: # 2 seconds print 'Doubled mid-file marker deleted.' del file_markers[i] print 'Markers:', file_markers # Load AudioSegment for encoded segments if export_format is not 'wav': ifile_mp3 = AudioSegment.from_wav(input_filename_fullpath) # Loop through cues and write regions (assumes start and end markers) for marker_num, marker in enumerate(file_markers): if marker_num == len(file_markers) - 1: break region_basename = input_basename + '_' + str(marker_num + 1).zfill(2) + '.wav' region_name = os.path.join(config.split_folder, region_basename) region_length = file_markers[marker_num + 1] - marker m, s = divmod(region_length / 44100, 60) #print 'Region {0} | {1}:{2} -> {3} ({4} - {5})'.format(marker_num, m, str(s).zfill(2), region_name, (marker / 44.1), # (file_markers[marker_num + 1] / 44.1)) #debug if export_format is not 'wav': mp3_track = ifile_mp3[(marker / 44.1):(file_markers[marker_num + 1] / 44.1)] if tracks: mp3_title = tracks[marker_num] else: mp3_title = 'Track ' + str(marker_num + 1) mp3_tags = {'title': mp3_title, 'artist': artist, 'albumartist': artist, 'album': album, 'track': (marker_num + 1)} mp3_fname = os.path.join(config.split_folder, (os.path.splitext(region_basename)[0] + '.' + export_format)) # print 'Export:', mp3_fname, export_format, bitrate, mp3_tags mp3_track.export(out_f=mp3_fname, format=export_format, bitrate=bitrate, tags=mp3_tags) # Add cover art if cover: audio = MP3(mp3_fname, ID3=ID3) # Add ID3 tag if it doesn't exist try: audio.add_tags() except error: pass audio.tags.add( APIC( encoding=3, # 3 is for utf-8 mime='image/jpeg', # image/jpeg or image/png type=3, # 3 is for the cover image desc=u'Cover', data=open(cover, 'rb').read() ) ) audio.save() else: pass #print 'No cover found for item ', mp3_fname else: region = wave.open(region_name, 'w') region.setparams(ifile_params) region.writeframes(ifile.readframes(region_length)) region.close() ifile.close() if __name__ == '__main__': # Make input filename list logging.basicConfig(level=config.log_level) if config.input_resplit_list: utils.export_csv(['re-split_list', 'resplit-track-lists']) with open(config.resplit_list_path, 'r') as resplit_list: rows = csv.reader(resplit_list) for row in rows: ''' numeric_length = 50 for i, c in enumerate(row[0]): if not c.isdigit(): numeric_length = i break #print row[0][:numeric_length].zfill(5) + row[0][numeric_length:] + '_clean.wav' input_filename_list.append(row[0][:numeric_length].zfill(5) + row[0][numeric_length:] + '_clean.wav') ''' print row[0] input_filename_list.append(row[0] + '_clean.wav') # Remove bad split files from Julius deleted_resplits = [] print 'Deleting resplits...' for each_file in os.listdir(config.server_split_folder): if any(each for each in input_filename_list if each_file.split('_clean')[0].startswith(each.split('_clean')[0])): deleted_resplits.append(each_file) send2trash(os.path.join(config.server_split_folder, each_file)) print 'Deleted re-splits from Julius ({0}): {1}'.format(len(deleted_resplits), deleted_resplits) print 'Input filenames (re-record list):', len(input_filename_list) elif config.serial_series or True: #"Or True" lol for g in os.listdir(config.server_clean_folder): if g.startswith('k299') and g.endswith('.wav'): #if input_filename_list_range[0] <= g < input_filename_list_range[1] and g.lower().endswith('.wav'): input_filename_list.append(g) ''' # If end of range reached (redundant?) if g.startswith(input_filename_list_range[1]): break ''' print 'Input filenames (series):', len(input_filename_list) elif config.input_whole_folder: for f in os.listdir(config.server_clean_folder): if f.endswith('.wav'): input_filename_list.append(f) print 'Input filenames (folder):', len(input_filename_list) else: input_filename_list = [input_filename] if not config.input_resplit_list: input_filename_list.sort() print input_filename_list # RUN THE TRAP pool_array = [os.path.join(config.server_clean_folder, each) for each in input_filename_list] ''' if config.pool_processing: pool = multiprocessing.Pool(min(6, multiprocessing.cpu_count() - 1)) pool.map(split_file, pool_array) else: for each in pool_array: split_file(each) ''' for each in pool_array: split_file(each) if errors: print ' ---------------' print 'Files not found:' for error in errors: print error print 'Discogs time!' import discogs discogs.main()
[ "alex@ameryn.com" ]
alex@ameryn.com
23e77f8d02e5d307347f08baca5d033626e01412
51b7b81cce1e8943926c531ad8763af8fd4074dc
/1260.py
8280b9c478f211dddcdc27f39f47b057c9ca1dae
[]
no_license
goodsosbva/BOJ_Graph
f65598591b07ea2f637cba2644bdc81386afb36e
34fe8bfec0543d9884869fe5ebbb536c6fcc3fbf
refs/heads/main
2023-03-22T08:14:53.735351
2021-03-07T09:22:39
2021-03-07T09:22:39
338,587,428
1
0
null
null
null
null
UTF-8
Python
false
false
790
py
N, M, V = map(int, input().split()) matrix = [[0] * (N + 1) for i in range(N + 1)] for i in range(M): a, b = map(int, input().split()) matrix[a][b] = matrix[b][a] = 1 visit_list = [0] * (N + 1) def dfs(V): visit_list[V] = 1 # 방문한 점 1로 표시 print(V, end=' ') for i in range(1, N + 1): if (visit_list[i] == 0 and matrix[V][i] == 1): dfs(i) def bfs(V): queue = [V] # 들려야 할 정점 저장 visit_list[V] = 0 # 방문한 점 0으로 표시 while queue: V = queue.pop(0) print(V, end=' ') for i in range(1, N + 1): if (visit_list[i] == 1 and matrix[V][i] == 1): queue.append(i) visit_list[i] = 0 dfs(V) print() bfs(V)
[ "noreply@github.com" ]
noreply@github.com
59d68b168bc022dcb37e44d8200e565c72a8f89a
7dd1d47c99d1b4485f96c53b79049a342795fb6b
/app/core/migrations/0004_recipe.py
414d3a799d601268beb68890d5d5d977ed26647c
[]
no_license
klaudiakryskiewicz/recipe-app-api
1966f09c5c7d8e9364797b70f034d30f0170c167
19db17d6710f39ab98e0a19b8032aa54f54eff12
refs/heads/main
2023-06-01T04:35:13.359488
2021-06-08T14:14:09
2021-06-08T14:14:09
369,801,500
0
0
null
null
null
null
UTF-8
Python
false
false
1,043
py
# Generated by Django 2.1.15 on 2021-05-26 16:57 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0003_ingredient'), ] operations = [ migrations.CreateModel( name='Recipe', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255)), ('time_minutes', models.IntegerField()), ('price', models.DecimalField(decimal_places=2, max_digits=5)), ('link', models.CharField(blank=True, max_length=255)), ('ingredients', models.ManyToManyField(to='core.Ingredient')), ('tags', models.ManyToManyField(to='core.Tag')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "klaudia.sandra.kryskiewicz@gmail.com" ]
klaudia.sandra.kryskiewicz@gmail.com
a42609d3d57b7e0f3298e6dee88c7531e8b4df7b
32c915adc51bdb5d2deab2a592d9f3ca7b7dc375
/Chapter_11_programming_tasks/task_2.py
57291c7a43269738ae347bef625ced59459b1aa2
[]
no_license
nervig/Starting_Out_With_Python
603c2b8c9686edcf92c1a90596d552b873fe6229
d617ee479c7c77038331b5f262e00f59e8e90070
refs/heads/master
2023-02-25T07:14:12.685417
2021-02-02T18:45:00
2021-02-02T18:45:00
335,391,362
2
1
null
null
null
null
UTF-8
Python
false
false
725
py
import employee def main(): name_of_employee = input('Enter a name of employee: ') number_of_employee = input('Enter a number of employee: ') annual_salary = input('Enter an annual salary: ') annual_bonus = input('Enter an annual bonus: ') data_of_shift_supervisor = employee.ShiftSupervisor(name_of_employee, number_of_employee, annual_salary, annual_bonus) print('The data of shift supervisor: ') print('Name: ' + data_of_shift_supervisor.get_name_of_employee()) print('ID: ' + data_of_shift_supervisor.get_number_of_employee()) print('Annual salary: ' + data_of_shift_supervisor.get_annual_salary()) print('Annual bonus: ' + data_of_shift_supervisor.get_annual_bonus()) main()
[ "solide@yandex.ru" ]
solide@yandex.ru
463e8250d430cbb2b5411ccf886fc6006e203cb9
9ca223ad566c40efc0917f2a33bf13446102c5ae
/Prueba.py
4f2ef40ec8589fd8714f917276f42a7bb1ca3148
[ "Unlicense" ]
permissive
EstebanRiso/Python-IA
b1e65d7ab6e829e1c87e2308b348822830ae9de3
352186ae74fcdc3b1252918999413d0e11b7ae26
refs/heads/main
2023-01-15T15:31:47.371728
2020-11-18T20:54:34
2020-11-18T20:54:34
312,214,800
0
0
null
null
null
null
UTF-8
Python
false
false
95
py
import Nodos def principal(): Nodos.nombre='silencio' print(Nodos.nombre) principal()
[ "SteveRiso.2000@gmail.com" ]
SteveRiso.2000@gmail.com
ed1a80133b79485d1c7d0125da7309754e321eea
d922b02070c11c19ba6104daa3a1544e27a06e40
/DSA_Project/venv/Scripts/easy_install-3.8-script.py
d71594c04a3b333adb75b4777054c951680c802e
[]
no_license
viharivnv/DSA
2ca393a8e304ee7b4d540ff435e832d94ee4b2a7
777c7281999ad99a0359c44291dddaa868a2525c
refs/heads/master
2022-10-15T15:26:59.045698
2020-06-17T15:55:33
2020-06-17T15:55:33
273,020,116
0
0
null
null
null
null
UTF-8
Python
false
false
462
py
#!C:\Users\vihar\PycharmProjects\DSA_Project\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8' __requires__ = 'setuptools==40.8.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')() )
[ "52350934+viharivnv@users.noreply.github.com" ]
52350934+viharivnv@users.noreply.github.com
463b39f9e721b88b41a491bba21489eef81fe2d8
ba63a484cf8859c0f46da90390cc23d7c4a85673
/ur5_run_moveit_cpp/launch/ur5_run_moveit_cpp.launch.py
93c8532c0550b357588cf720326dff9550148fee
[]
no_license
migueelnery/ur5_moveit2_demo
da9ade878742bd1492f652cebc526f76f520ea3f
230a8773b3700f2ae9f218852867a6287e60d174
refs/heads/main
2023-03-06T19:06:18.684993
2021-02-19T06:06:49
2021-02-19T06:06:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,318
py
import os import yaml from launch import LaunchDescription from launch_ros.actions import Node from ament_index_python.packages import get_package_share_directory def load_file(package_name, file_path): package_path = get_package_share_directory(package_name) absolute_file_path = os.path.join(package_path, file_path) with open(absolute_file_path, 'r') as file: return file.read() def load_yaml(package_name, file_path): package_path = get_package_share_directory(package_name) absolute_file_path = os.path.join(package_path, file_path) with open(absolute_file_path, 'r') as file: return yaml.safe_load(file) def generate_launch_description(): # moveit_cpp.yaml is passed by filename for now since it's node specific moveit_cpp_yaml_file_name = get_package_share_directory('ur5_run_moveit_cpp') + "/config/moveit_cpp.yaml" # Component yaml files are grouped in separate namespaces robot_description_config = load_file('ur_e_description', 'urdf/ur5e_robot.urdf') robot_description = {'robot_description' : robot_description_config} robot_description_semantic_config = load_file('ur5_e_moveit_config', 'config/ur5e.srdf') robot_description_semantic = {'robot_description_semantic' : robot_description_semantic_config} kinematics_yaml = load_yaml('ur5_e_moveit_config', 'config/kinematics.yaml') robot_description_kinematics = { 'robot_description_kinematics' : kinematics_yaml } controllers_yaml = load_yaml('ur5_run_moveit_cpp', 'config/controllers.yaml') moveit_controllers = { 'moveit_simple_controller_manager' : controllers_yaml, 'moveit_controller_manager': 'moveit_simple_controller_manager/MoveItSimpleControllerManager'} ompl_planning_pipeline_config = { 'ompl' : { 'planning_plugin' : 'ompl_interface/OMPLPlanner', 'request_adapters' : """default_planner_request_adapters/AddTimeOptimalParameterization default_planner_request_adapters/FixWorkspaceBounds default_planner_request_adapters/FixStartStateBounds default_planner_request_adapters/FixStartStateCollision default_planner_request_adapters/FixStartStatePathConstraints""" , 'start_state_max_bounds_error' : 0.1 } } ompl_planning_yaml = load_yaml('ur5_e_moveit_config', 'config/ompl_planning.yaml') ompl_planning_pipeline_config['ompl'].update(ompl_planning_yaml) # MoveItCpp demo executable run_moveit_cpp_node = Node(name='ur5_run_moveit_cpp', package='ur5_run_moveit_cpp', # TODO(henningkayser): add debug argument # prefix='gdb -ex=r --args', executable='ur5_run_moveit_cpp', output='screen', arguments=['--ros-args --log-level debug'], parameters=[moveit_cpp_yaml_file_name, robot_description, robot_description_semantic, kinematics_yaml, ompl_planning_pipeline_config, moveit_controllers]) # RViz rviz_config_file = get_package_share_directory('ur5_run_moveit_cpp') + "/launch/ur5_run_moveit_cpp.rviz" rviz_node = Node(package='rviz2', executable='rviz2', name='rviz2', output='log', arguments=['-d', rviz_config_file], parameters=[robot_description, robot_description_semantic]) # Static TF static_tf = Node(package='tf2_ros', executable='static_transform_publisher', name='static_transform_publisher', output='log', arguments=['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', 'world', 'base_link']) # Publish TF robot_state_publisher = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='both', parameters=[robot_description]) # Fake joint driver fake_joint_driver_node = Node(package='fake_joint_driver', executable='fake_joint_driver_node', # TODO(JafarAbdi): Why this launch the two nodes (controller manager and the fake joint driver) with the same name! # name='fake_joint_driver_node', parameters=[{'controller_name': 'ur5_e_controller'}, os.path.join(get_package_share_directory("ur5_run_moveit_cpp"), "config", "ur5_e_controllers.yaml"), os.path.join(get_package_share_directory("ur5_run_moveit_cpp"), "config", "start_positions.yaml"), robot_description] ) return LaunchDescription([ static_tf, robot_state_publisher, rviz_node, run_moveit_cpp_node, fake_joint_driver_node ])
[ "floris.erich@aist.go.jp" ]
floris.erich@aist.go.jp
c1a244e9a04e199b2cb55cf64972a4d7cd9cc23e
2350760b49f2e3120ad2d998f5cf854c600367c7
/utils/enhance.py
92baa841c2399580ad0eff21ac378b264fa05bc7
[ "Apache-2.0", "Apache-1.1" ]
permissive
aishu-123-4/Automatic-License-Plate-Recognition
b7bd2e0af63574161dc13d8b9bcc8cc7dbd03a1b
310797715d2705d302aeae55042b6415469785ed
refs/heads/master
2023-04-22T22:52:07.919753
2021-05-14T06:18:17
2021-05-14T06:18:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,970
py
import streamlit as st import cv2 import numpy as np from PIL import Image,ImageEnhance crop, image = None, None img_size, crop_size = 600, 400 # noise removal def remove_noise(image): return cv2.medianBlur(image,5) #thresholding def thresholding(image, rate): return cv2.threshold(image, rate, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] #dilation def dilate(image): kernel = np.ones((5,5),np.uint8) return cv2.dilate(image, kernel, iterations = 1) #erosion def erode(image): kernel = np.ones((5,5),np.uint8) return cv2.erode(image, kernel, iterations = 1) #opening - erosion followed by dilation def opening(image): kernel = np.ones((5,5),np.uint8) return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel) #skew correction def deskew(image): coords = np.column_stack(np.where(image > 0)) angle = cv2.minAreaRect(coords)[-1] if angle < -45: angle = -(90 + angle) else: angle = -angle (h, w) = image.shape[:2] center = (w // 2, h // 2) M = cv2.getRotationMatrix2D(center, angle, 1.0) rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE) return rotated def cannize_image(image): new_img = np.array(image.convert('RGB')) img = cv2.cvtColor(new_img,1) img = cv2.GaussianBlur(img, (11, 11), 0) canny = cv2.Canny(img, 100, 150) return canny @st.cache(suppress_st_warning=True, allow_output_mutation=True, show_spinner=False) def cropped_image(image, b): crop = cv2.rectangle(np.array(image), (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 0, 255), 1) crop = crop[int(b[1]):int(b[3]), int(b[0]):int(b[2])] crop = Image.fromarray(crop) return crop # @st.cache(suppress_st_warning=True, allow_output_mutation=True, show_spinner=True) def enhance_crop(crop): st.write("## Enhanced License Plate") rgb = np.array(crop.convert('RGB')) gray = cv2.cvtColor(rgb,cv2.COLOR_BGR2GRAY) enhance_type = st.radio("Enhance Type",\ ["Original","Custom", "Gray-Scale","Contrast",\ "Brightness","Blurring","Cannize",\ "Remove_noise", "Thresholding", "Dilate",\ "Opening","Erode", "Deskew"]) crop_display = st.empty() slider = st.empty() if enhance_type in ["Contrast","Brightness","Blurring","Thresholding","Custom"]: rate = slider.slider(enhance_type,0.2,8.0,(1.5)) if enhance_type == 'Original': output_image = crop elif enhance_type == 'Gray-Scale': output_image = cv2.cvtColor(rgb,cv2.COLOR_BGR2GRAY) elif enhance_type == 'Contrast': enhancer = ImageEnhance.Contrast(crop) output_image = enhancer.enhance(rate) elif enhance_type == 'Brightness': enhancer = ImageEnhance.Brightness(crop) output_image = enhancer.enhance(rate) elif enhance_type == 'Blurring': img = cv2.cvtColor(rgb,1) output_image = cv2.GaussianBlur(img,(11,11),rate) elif enhance_type == 'Cannize': output_image = cannize_image(crop) elif enhance_type == "Remove_noise": output_image = remove_noise(rgb) elif enhance_type == "Thresholding": output_image = thresholding(gray, rate) elif enhance_type == "Dilate": output_image = dilate(rgb) elif enhance_type == "Opening": output_image = opening(rgb) elif enhance_type == "Erode": output_image = erode(rgb) elif enhance_type == "Deskew": output_image = deskew(np.array(gray)) elif enhance_type == "Custom": # resized = cv2.resize(gray, interpolation=cv2.INTER_CUBIC) dn_gray = cv2.fastNlMeansDenoising(gray, templateWindowSize=7, h=rate) gray_bin = thresholding(dn_gray, rate) output_image = gray_bin crop_display.image(output_image, width = crop_size, caption = enhance_type) return output_image
[ "udaylunawat@gmail.com" ]
udaylunawat@gmail.com
531c6a581cba14a68fb47c94fa6e897915406f86
cb802b802e26d037c93f38f74a4b8cc4931b8e64
/cart/admin.py
ecd02cb09153465d65bad46f56c01b014f9f9376
[]
no_license
aadhil06/ecommerce
c3e2acc55c2808fc9daecd2ed91b79aa46c60455
52707eb7b1b205d4d3a6b7b336164736a008f65c
refs/heads/master
2023-07-02T04:38:21.262112
2021-08-11T08:42:06
2021-08-11T08:42:06
394,891,300
0
0
null
null
null
null
UTF-8
Python
false
false
140
py
from django.contrib import admin from.models import * # Register your models here. admin.site.register(cartlist) admin.site.register(items)
[ "aadhilks007@gmail.com" ]
aadhilks007@gmail.com
43569dfdaa6bacdb0b558ab38c03f2020ef7bfec
8d1c7cc1e66c5ea717a876116424c4f1029fb21f
/pollster/polls/admin.py
8b4709b2ca2f3c8c407167e7228e11e74b80f168
[]
no_license
NickFoden/pollster
a5d58d78ae476f3576d54b384ac8900880ea1ee7
1cf327783af1bee4960c21c4728192ff3da26a6f
refs/heads/master
2020-07-25T14:24:31.083072
2019-09-13T18:20:11
2019-09-13T18:20:11
208,322,052
0
0
null
null
null
null
UTF-8
Python
false
false
611
py
from django.contrib import admin from .models import Question, Choice admin.site.site_header = "Pollster Admin" admin.site.site_title = "Pollster Admin Area" admin.site.index_title = "Welcome to the Pollster admin area" class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class QuestionAdmin(admin.ModelAdmin): fieldsets = [(None, {'fields':['question_text']}), ('Date Information', {'fields': ['pub_date'], 'classes':['collapse']}),] inlines = [ChoiceInline] # admin.site.register(Question) # admin.site.register(Choice) admin.site.register(Question, QuestionAdmin)
[ "nickfoden@gmail.com" ]
nickfoden@gmail.com
5bef409a74c946f0c10245a3696fedcf2ea6717c
198e4e500f308978008374619cfd3b6d4e9ab3d9
/pes/migrations/0004_player_manager.py
b1beeeb4cf3c43e0a5a623da82fafad355a2e0c3
[]
no_license
UditChugh/pml
4ffec91759d0b0e1cb00ad9ef73ef1a5c2fa7931
d41cc8461876301e2bb25ea2870ca0bec39fcc4a
refs/heads/master
2023-08-03T20:54:47.940878
2021-09-16T12:16:13
2021-09-16T12:16:13
289,675,466
1
1
null
null
null
null
UTF-8
Python
false
false
402
py
# Generated by Django 3.1 on 2020-09-11 18:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pes', '0003_player_total_goal_diff'), ] operations = [ migrations.AddField( model_name='player', name='manager', field=models.CharField(default='unknown', max_length=30), ), ]
[ "23udit@gmail.com" ]
23udit@gmail.com