blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c615eeec86ee49817a3513724374a206511e132
|
060fbf2a69a90ad92de5fc877521d5ea6b298007
|
/test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/models/double_wrapper.py
|
598e2b460d799b8c9b576803570caa93bfc99961
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
iscai-msft/autorest.python
|
db47a8f00253148fbc327fe0ae1b0f7921b397c6
|
a9f38dd762fbc046ce6197bfabea2f56045d2957
|
refs/heads/master
| 2021-08-02T13:06:34.768117
| 2018-11-21T00:29:31
| 2018-11-21T00:29:31
| 161,554,205
| 0
| 0
|
MIT
| 2018-12-12T22:42:14
| 2018-12-12T22:42:14
| null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DoubleWrapper(Model):
"""DoubleWrapper.
:param field1:
:type field1: float
:param
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
:type
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
float
"""
_attribute_map = {
'field1': {'key': 'field1', 'type': 'float'},
'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose': {'key': 'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', 'type': 'float'},
}
def __init__(self, **kwargs):
super(DoubleWrapper, self).__init__(**kwargs)
self.field1 = kwargs.get('field1', None)
self.field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose = kwargs.get('field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', None)
|
[
"noreply@github.com"
] |
iscai-msft.noreply@github.com
|
c95ffcfc0844e8799a1716f4dee8de9d4fb5917b
|
eb2c58eaa3d9e44cbba6e67c4c54827a0c00347e
|
/Cosmo/mcmc_chain/write_para_table.py
|
bb75fa2dc606c4b9bd28f3e13f338572dc49dbbb
|
[
"MIT"
] |
permissive
|
lshuns/CosmicShearRB
|
3f6b608d5db77aec24ba1d78a953b5b97d539ce9
|
84d682fc09dc8be0e12b82894cfb2c2c272b616b
|
refs/heads/master
| 2022-12-28T02:33:05.649083
| 2020-10-14T12:30:32
| 2020-10-14T12:30:32
| 218,310,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,067
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 11:19:58 2017
@author: fkoehlin
@modified: Shun-Sheng Li
"""
import os
import sys
import glob
import numpy as np
# Bayesian way of defining confidence intervals:
# What's the difference to percentiles?
def minimum_credible_intervals(values, central_value, weights, bins=40):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
copy & paste from Monte Python (2.1.2) with own modifications
--> checked that this function returns same output as Monte Python; modifications are all okay!!!
"""
#histogram = info.hist
#bincenters = info.bincenters
#levels = info.levels
histogram, bin_edges = np.histogram(values, bins=bins, weights=weights, normed=False)
bincenters = 0.5*(bin_edges[1:]+bin_edges[:-1])
# Defining the sigma contours (1, 2 and 3-sigma)
levels = np.array([68.27, 95.45, 99.73])/100.
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = np.max(int(histogram[0] - 0.5*(histogram[1]-histogram[0])), 0)
right_edge = np.max(int(histogram[-1] + 0.5*(histogram[-1]-histogram[-2])), 0)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
'''
if ((indices[-1]-indices[0]+1) != len(indices)):
print('Could not derive minimum credible intervals for this multimodal posterior!')
failed = True
break
'''
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1e4):
print('The loop to check for sigma deviations was taking too long to converge.')
break
# min
if indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= central_value
return bounds
def weighted_mean(values, weights=None):
if weights is None:
weights = np.ones_like(values)
return np.sum(weights*values)/np.sum(weights)
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def get_values_and_intervals(parameters, weights, use_median=False):
param_values = np.zeros((len(parameters), 7))
confidence_values = np.zeros((len(parameters), 6))
for idx, param in enumerate(parameters):
if use_median:
central_value = quantile(param, [0.5], weights=weights)[0]
else:
central_value = weighted_mean(param, weights=weights)
# bounds returns [[-1sigma, +1sigma],[-2sigma, +2sigma], [-3sigma, +3sigma]]
bounds = minimum_credible_intervals(param, central_value, weights, bins=50)
param_values[idx, :] = np.concatenate(([central_value], bounds[:,0], bounds[:,1]))
confidence_values[idx, :] = central_value + bounds.flatten()
return param_values, confidence_values
def write_parameters_to_file(fname, best_fit_params, fit_statistics, param_values_mean, confidence_values_mean, param_values_median, confidence_values_median, labels, labels_tex):
with open(fname, 'w') as f:
f.write('# Best fitting values: \n')
f.write('\chi^2 = {:.4f}, \chi^2_red = {:.4f} ({:} d.o.f.), index in chain = {:.0f} \n'.format(fit_statistics[0], fit_statistics[1], int(fit_statistics[2]), fit_statistics[3]))
for index, label in enumerate(labels):
name = label +':'
f.write(name.ljust(20, ' ')+'{:.4f} \n'.format(best_fit_params[index]))
### (weighted) MEAN ###
f.write('\n'+'# parameter, MEAN, err_minus (68%), err_plus (68%), MEAN, err_minus (95%), err_plus (95%), MEAN, err_minus (99%), err_plus (99%) \n')
for index, label in enumerate(labels):
name = label +':'
f.write(name.ljust(20, ' ') + '{0:.4f} {1:.4f} +{2:.4f}, {0:.4f} {3:.4f} +{4:.4f}, {0:.4f} {5:.4f} +{6:.4f} \n'.format(param_values_mean[index, 0], param_values_mean[index, 1], param_values_mean[index, 4], param_values_mean[index, 2], param_values_mean[index, 5], param_values_mean[index, 3], param_values_mean[index, 6]))
f.write('\n'+'# parameter, lower bound (68%), upper bound (68%), lower bound (95%), upper bound (95%), lower bound (99%), upper bound (99%) \n')
for index, label in enumerate(labels):
name = label +':'
f.write(name.ljust(20, ' ')+'1sigma >{:.4f}, 1sigma <{:.4f}, 2sigma >{:.4f}, 2sigma <{:.4f}, 3sigma >{:.4f}, 3sigma <{:.4f} \n'.format(confidence_values_mean[index, 0], confidence_values_mean[index, 1], confidence_values_mean[index, 2], confidence_values_mean[index, 3], confidence_values_mean[index, 4], confidence_values_mean[index, 5]))
### (weighted) MEDIAN ###
f.write('\n'+'# parameter, MEDIAN, err_minus (68%), err_plus (68%), MEDIAN, err_minus (95%), err_plus (95%), MEDIAN, err_minus (99%), err_plus (99%) \n')
for index, label in enumerate(labels):
name = label +':'
f.write(name.ljust(20, ' ') + '{0:.4f} {1:.4f} +{2:.4f}, {0:.4f} {3:.4f} +{4:.4f}, {0:.4f} {5:.4f} +{6:.4f} \n'.format(param_values_median[index, 0], param_values_median[index, 1], param_values_median[index, 4], param_values_median[index, 2], param_values_median[index, 5], param_values_median[index, 3], param_values_median[index, 6]))
f.write('\n'+'# parameter, lower bound (68%), upper bound (68%), lower bound (95%), upper bound (95%), lower bound (99%), upper bound (99%) \n')
for index, label in enumerate(labels):
name = label +':'
f.write(name.ljust(20, ' ')+'1sigma >{:.4f}, 1sigma <{:.4f}, 2sigma >{:.4f}, 2sigma <{:.4f}, 3sigma >{:.4f}, 3sigma <{:.4f} \n'.format(confidence_values_median[index, 0], confidence_values_median[index, 1], confidence_values_median[index, 2], confidence_values_median[index, 3], confidence_values_median[index, 4], confidence_values_median[index, 5]))
### (weighted) MEAN (TeX) ###
f.write('\n'+'\n'+'\n'+'### TeX ###'+'\n'+'# parameter, MEAN, err_minus (68%), err_plus (68%), MEAN, err_minus (95%), err_plus (95%), MEAN, err_minus (99%), err_plus (99%) \n')
for index, label in enumerate(labels_tex):
name = label +':'
f.write(name.ljust(20, ' ')+'{0:.3f}_{{{1:.3f}}}^{{+{2:.3f}}}, {0:.3f}_{{{3:.3f}}}^{{+{4:.3f}}}, {0:.3f}_{{{5:.3f}}}^{{+{6:.3f}}} \n'.format(param_values_mean[index, 0], param_values_mean[index, 1], param_values_mean[index, 4], param_values_mean[index, 2], param_values_mean[index, 5], param_values_mean[index, 3], param_values_mean[index, 6]))
### (weighted) MEDIAN (TeX) ###
f.write('\n'+'\n'+'\n'+'### TeX ###'+'\n'+'# parameter, MEDIAN, err_minus (68%), err_plus (68%), MEDIAN, err_minus (95%), err_plus (95%), MEDIAN, err_minus (99%), err_plus (99%) \n')
for index, label in enumerate(labels_tex):
name = label +':'
f.write(name.ljust(20, ' ')+'{0:.3f}_{{{1:.3f}}}^{{+{2:.3f}}}, {0:.3f}_{{{3:.3f}}}^{{+{4:.3f}}}, {0:.3f}_{{{5:.3f}}}^{{+{6:.3f}}} \n'.format(param_values_median[index, 0], param_values_median[index, 1], param_values_median[index, 4], param_values_median[index, 2], param_values_median[index, 5], param_values_median[index, 3], param_values_median[index, 6]))
print 'File saved to: \n', fname
return
if __name__ == '__main__':
# path_to_chain = './KV450_H0_Dz_IA/'
path_to_chain = sys.argv[1]
fname = glob.glob(path_to_chain + '*.txt')[0]
data = np.loadtxt(fname)
weights = data[:, 0]
#print data
#print data[:, -1]
# glob can expand names with *-operator!
fname = glob.glob(path_to_chain + '*.paramnames')[0]
print fname
names = np.loadtxt(fname, dtype=str, delimiter='\t')
print names, names.shape
labels = names[:, 0]
labels_tex = names[:, 1]
chi2 = 2. * data[:, 1]
min_chi2 = chi2.min()
best_fit_index = np.where(data[:, 1] == data[:, 1].min())
print best_fit_index
best_fit_params = data[best_fit_index]
fit_statistics = np.array([min_chi2, 0., 0., int(best_fit_index[0])])
params_mean, conf_mean = get_values_and_intervals(data[:, 2:].T, weights, use_median=False)
params_median, conf_median = get_values_and_intervals(data[:, 2:].T, weights, use_median=True)
fname = os.path.join(path_to_chain, 'parameter_table.txt')
write_parameters_to_file(fname, best_fit_params[0, 2:], fit_statistics, params_mean, conf_mean, params_median, conf_median, labels, labels_tex)
|
[
"1006682997@qq.com"
] |
1006682997@qq.com
|
3034ae32e6ed518b69f539e10f4cf4208942ce29
|
34b8df07503d669a8123dc9f17bd2e684cc5c299
|
/scripts/ObjectScattering.py
|
3ec042d5bb6933d0d261712e93cf17922bbd96a0
|
[
"MIT"
] |
permissive
|
danOrzc/AssetGallery
|
e2c91485983a7bf6193d3fa2e22e3d4b7afdb618
|
adc15becd681399c22f7722ec860fc3f4cbebe98
|
refs/heads/master
| 2022-12-01T20:12:37.383482
| 2020-08-14T20:56:45
| 2020-08-14T20:56:45
| 284,359,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,982
|
py
|
"""Object Scattering.
This module contains generators that create random position with some given parameters.
"""
from maya import cmds
import maya.api.OpenMaya as om
import random
import math
def scatterOnCurve(curve, objectCount=5, **kwargs):
"""This function creates random positions along a given curve.
Attributes:
curve (str): The curve to spawn objects on.
objectCount (int): The number of copies to spawn.
**kwargs: Arbitrary keyword arguments.
Yields:
list: x, y and z position on the curve.
"""
# Rebuild the curve to have equal spacing
rebuiltCurve = cmds.rebuildCurve(curve, degree=1, spans=objectCount-1, end=1, replaceOriginal=False, constructionHistory=False, name="BuildingPathRebuilt")[0]
cvList = cmds.ls("{}.cv[*]".format(rebuiltCurve), flatten=True)
# Get each cv's position
for cv in cvList:
cvPosition = cmds.pointPosition(cv, world=True)
yield cvPosition
# Delete the rebuilt curve
cmds.delete(rebuiltCurve)
def scatterOnRange(objectCount, minX, minY, minZ, maxX, maxY, maxZ, **kwargs):
"""This function creates random positions along a given curve.
Attributes:
objectCount (int): The number of copies to spawn.
minX (int): The minimum X coordicate.
minY (int): The minimum Y coordicate.
minZ (int): The minimum Z coordicate.
maxX (int): The maximum X coordicate.
maxY (int): The maximum Y coordicate.
maxZ (int): The maximum Z coordicate.
**kwargs: Arbitrary keyword arguments.
Yields:
list: A random x, y and z position
"""
# Generate random positions for each object
for index in xrange(objectCount):
randomX = random.uniform(minX, maxX)
randomY = random.uniform(minY, maxY)
randomZ = random.uniform(minZ, maxZ)
yield (randomX, randomY, randomZ)
def scatterOnMesh(objectCount, mesh, **kwargs):
"""This function creates random positions along a mesh' surface.
Attributes:
objectCount (int): The number of copies to spawn.
mesh (str): The name of the mesh to spawn objects on.
**kwargs: Arbitrary keyword arguments.
Yields:
list: A random location (x, y and z) on the surface of the mesh, and a rotation in Euler angles.
list[0], list[1] and list[2] Are X, Y and Z coordinates.
list[3] is x, y and z rotation as Euler angles to align object to normal
"""
# Generate on each object
for i in xrange(objectCount):
position = get3DPosition(mesh)
# Rotate upvector towards normal
rotateQuat = om.MQuaternion(om.MVector(0, 1, 0), om.MVector(position[3]))
# Get rotation in Euler
rotateEuler = rotateQuat.asEulerRotation()
rotation = [math.degrees(i) for i in rotateEuler]
position[3] = rotation
yield position
def get3DPosition(mesh):
"""This function gets a random 3D position on a mesh's surface.
Attributes:
mesh (str): The name of the mesh to get the point on.
Returns:
list: x, y, z, and Euler angles of the point.
"""
# Generate random uv coordinate to use with getPointAtUV
randomU = random.uniform(0,1)
randomV = random.uniform(0,1)
position = getPointAtUV(mesh, randomU, randomV)
# If the position is None, generate another
if not position:
position = get3DPosition(mesh)
return position
def getPointAtUV(mesh, U, V):
"""This function calculates the 3D coordinates based on UV coordinates.
Attributes:
mesh (str): The name of the mesh to get the point on.
U (float): the u coordinate on uv
V (float): the v coordinate on uv
Returns:
list: x, y, z, and Euler angles of the point.
"""
# Create a Selection list (its like a wrapper class for a maya list)
selList = om.MSelectionList()
# Add the mesh to the list
selList.add(mesh)
# Get MDagPath object based on the mesh
dagPath = selList.getDagPath(0)
# Check if selected object is a mesh
if dagPath.hasFn( om.MFn.kMesh ):
# Get Maya Mesh object based on dagPath
mesh = om.MFnMesh(dagPath)
# Get uv sets
uvset = mesh.getUVSetNames()
# Get each face
for face in range(mesh.numPolygons):
# Try to look for the uv point in the given face
try:
# Get uv points on kWorld space
point = mesh.getPointAtUV(face,U,V,space=om.MSpace.kWorld,uvSet=uvset[0],tolerance=0.0)
# Get face normal
normal = mesh.getPolygonNormal(face)
# Return the position and the face normal
result = list(point)[:3] + [list(normal)]
return result
# If the poiint was not found, there is an exception
except:
pass
return None
|
[
"dan.orzc@gmail.com"
] |
dan.orzc@gmail.com
|
a3f8fc1ff5505eba4bd7a0d5edaf2e5a2abc62f0
|
81e7aaee6271d67f9f73884d9b234548e752ac13
|
/checkout/views.py
|
d3c4bbe4088f5906d2b83bfbb3d763de3ebbd2a3
|
[] |
no_license
|
steff880/ci-boutique_ado_v1
|
9fba7c5c3c1ace301c60c8919cb2a94f6d5c33fa
|
0a94c19ded30fe2c6bbe98eee8d00bbc9d8c22b2
|
refs/heads/main
| 2023-08-02T12:11:47.378414
| 2021-10-10T16:47:21
| 2021-10-10T16:47:21
| 399,452,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,007
|
py
|
from django.shortcuts import render, redirect, reverse, get_object_or_404, HttpResponse
from django.views.decorators.http import require_POST
from django.contrib import messages
from django.conf import settings
from .forms import OrderForm
from .models import Order, OrderLineItem
from products.models import Product
from profiles.models import UserProfile
from profiles.forms import UserProfileForm
from bag.contexts import bag_contents
import stripe
import json
# Create your views here.
@require_POST
def cache_checkout_data(request):
try:
pid = request.POST.get('client_secret').split('_secret')[0]
print(pid)
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.PaymentIntent.modify(pid, metadata={
'bag': json.dumps(request.session.get('bag', {})),
'save_info': request.POST.get('save_info'),
'username': request.user,
})
return HttpResponse(status=200)
except Exception as e:
messages.error(request, 'Sorry, your payment cannot be \
processed right now. Please try again later.')
return HttpResponse(content=e, status=400)
def checkout(request):
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
if request.method == 'POST':
bag = request.session.get('bag', {})
form_data = {
'full_name': request.POST['full_name'],
'email': request.POST['email'],
'phone_number': request.POST['phone_number'],
'country': request.POST['country'],
'postcode': request.POST['postcode'],
'town_or_city': request.POST['town_or_city'],
'street_address1': request.POST['street_address1'],
'street_address2': request.POST['street_address2'],
'county': request.POST['county'],
}
order_form = OrderForm(form_data)
if order_form.is_valid():
order = order_form.save(commit=False)
pid = request.POST.get('client_secret').split('_secret')[0]
order.stripe_pid = pid
order.original_bag = json.dumps(bag)
order.save()
for item_id, item_data in bag.items():
try:
product = Product.objects.get(id=item_id)
if isinstance(item_data, int):
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=item_data,
)
order_line_item.save()
else:
for size, quantity in item_data['items_by_size'].items():
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=quantity,
product_size=size,
)
order_line_item.save()
except Product.DoesNotExist:
messages.error(request, (
"One of the products in your bag wasn't found in our database. "
"Please call us for assistance!")
)
order.delete()
return redirect(reverse('view_bag'))
# Save the info to the user's profile if all is well
request.session['save_info'] = 'save-info' in request.POST
return redirect(reverse('checkout_success', args=[order.order_number]))
else:
messages.error(request, 'There was an error with your form. \
Please double check your information.')
else:
bag = request.session.get('bag', {})
if not bag:
messages.error(request, "There's nothing in your bag at the moment")
return redirect(reverse('products'))
current_bag = bag_contents(request)
total = current_bag['grand_total']
stripe_total = round(total * 100)
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY
)
if request.user.is_authenticated:
try:
profile = UserProfile.objects.get(user=request.user)
order_form = OrderForm(initial={
'full_name': profile.user.get_full_name(),
'email': profile.user.email,
'phone_number': profile.default_phone_number,
'country': profile.default_country,
'postcode': profile.default_postcode,
'town_or_city': profile.default_town_or_city,
'street_address1': profile.default_street_address1,
'street_address2': profile.default_street_address2,
'county': profile.default_county,
})
except UserProfile.DoesNotExist:
order_form = OrderForm()
else:
order_form = OrderForm()
if not stripe_public_key:
messages.warning(request, 'Stripe public key is missing. \
Did you forget to set it in your environment?')
template = 'checkout/checkout.html'
context = {
'order_form': order_form,
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, template, context)
def checkout_success(request, order_number):
"""
Handle successful checkouts
"""
save_info = request.session.get('save_info')
order = get_object_or_404(Order, order_number=order_number)
if request.user.is_authenticated:
profile = UserProfile.objects.get(user=request.user)
# Attach the user's profile to the order
order.user_profile = profile
order.save()
# Save the user's info
if save_info:
profile_data = {
'default_phone_number': order.phone_number,
'default_country': order.country,
'default_postcode': order.postcode,
'default_town_or_city': order.town_or_city,
'default_street_address1': order.street_address1,
'default_street_address2': order.street_address2,
'default_county': order.county,
}
user_profile_form = UserProfileForm(profile_data, instance=profile)
if user_profile_form.is_valid():
user_profile_form.save()
messages.success(request, f'Order successfully processed! \
Your order number is {order_number}.A confirmation \
email will be sent to {order.email}.')
if 'bag' in request.session:
del request.session['bag']
template = 'checkout/checkout_success.html'
context = {
'order': order,
}
return render(request, template, context)
|
[
"stefan.d.yordanov@gmail.com"
] |
stefan.d.yordanov@gmail.com
|
97c8aa650495f09762264d3246a1009bddb7cccd
|
f741f7f070d150cffbb63f13666fec5dceb4c7c4
|
/Графы/Представление графов.py
|
e97690fd502801d566f1e6957ab68b655d2005ee
|
[] |
no_license
|
mahhets/algorithms-and-data-structures
|
7e2359c15a1cfd46200f9f2008a3a4050e2a5f40
|
d1d198020e9d1f7f5085188678828520e4da36c8
|
refs/heads/main
| 2023-05-29T02:38:05.716730
| 2021-06-10T20:26:34
| 2021-06-10T20:26:34
| 367,147,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,188
|
py
|
"""
1. Матрица смежности
2. Списки смежности
3. Списки ребер
"""
# 1.1 Представление простого графа в виде матрицы смежгости
"""
Вершина графа 0 соеднинена с 1 и 2
Вершина графа 1 соединена с 1, 2, 3
Вершина 2 соединена с 0, 1
Вершина 3 соединена с 1
"""
graph = [
[0,1,1,0], # 0
[1,0,1,1], # 1
[1,1,0,0], # 2
[0,0,0,1] # 3
]
print(*graph, sep='\n')
# 1.2 Ориентированный граф
"""
Не нулевое значение означает, что от одной вершины есть указатель к другой вершине
К примеру наша 1 вершина больше не связан с 0, но при этом 0 связан с 1, т.к. ориентированный граф направлен от 0 -> 1
Если же у линий смежности есть веса, то в нашем случае единицыв необходимо заменить на значения весов
При хранении графов в матрице смежности, мы будем тратить W**2 памяти, где W это кол-во вершин в графе
"""
graph = [
[0,1,1,0], # 0
[0,0,1,1], # 1
[0,1,0,0], # 2
[0,0,0,0] # 3
]
print(*graph, sep='\n')
print('*'*50)
graph[0][1:3] = [2,3]
graph[1][2] = 2
graph[2][1] = 2
print(*graph, sep='\n')
print('-'*50)
# 2. Списки смежности
"""
Создается список для каждой вершины
В этом списке хранятся вершины, которые являются соседними
"""
print('Списки смежности')
graph = []
graph.append([1,2])
graph.append([0,2,3])
graph.append([0,1])
graph.append([1])
print(*graph, sep='\n')
# Граф на основе словаря и множества
print('Граф на основе словаря и множества')
graph_2 = {
0:{1,2},
1:{0,2,3},
2:{0,1},
3:{1},
}
print(graph_2)
# Проверка, что из вершины 1 мы можем попасть в вершину 3
if 3 in graph_2[1]:
print('Из вершины 3 мы можем попасть в вершину 1')
# Взвешенный грай в списке смежности
print('Взвешенный грай в списке смежности')
from collections import namedtuple
Vertex = namedtuple('Vertex',['vertex','edge'])
graph_3 = []
graph_3.append([Vertex(1,2),Vertex(2,3)]) # Путь из 0 -> 1 стоит 2 единицы, путь 0 -> 2 стоит 3 единицы
graph_3.append([Vertex(0,2), Vertex(2,2), Vertex(3,1)])
graph_3.append([Vertex(0,3), Vertex(1,2)])
graph_3.append([Vertex(1,1)])
print(*graph_3, sep='\n')
# Проверка, что из вершины 1 можно попасть в вершину 3
for v in graph_3[1]:
# Если очередной элемент v из первой строки будет иметь 3 в качестве вершины, значит такой путь возможен
if v.vertex == 3:
print('это возможно')
# Хранение графов в виде класса
class Graph:
def __init__(self, vertex, edge, spam):
self.vertex = vertex
self.edge = edge
self.spam = spam # дополнительная информация о вершинах(если нужна)
# Далее нужно создать методы для работы с классами
# 3. Список ребер
"""
Данный способ заключается в том, что мы храним пары значений
(вершина из которой выходит ребро, вершина в которую заходит)
Если граф оказывается взвешенным, то логично добавить 3 элемент - вес ребра
(1,2,3) из вершины 1 в вершину 2 с весом 3
"""
print('Список ребер')
graph = [(0,1),(0,2),(1,2),(2,1),(1,3)]
print(*graph, sep='\n')
|
[
"the_mahh@mail.ru"
] |
the_mahh@mail.ru
|
b2d50324d2c956e535fc4e32778a1f731c8195bf
|
47b7a1a07f75667dc8a36b5f71b9aa8b8289e8f2
|
/model/Bayes/bayes_son/util/count_recai_percent.py
|
d53f24d4ec37d0b340ce603711ee022c96ccc412
|
[] |
no_license
|
javelir/SearchEngine
|
dc142cffd822c7aafbdf2b2e107c0cf34cf98c0b
|
69ed9cdde4f29fb873730fc5ea29cfb4c574ea05
|
refs/heads/master
| 2020-07-25T19:01:56.985217
| 2016-05-27T09:29:49
| 2016-05-27T09:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
#coding=utf-8
from __future__ import division
import os
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
os.chdir('../data')
words = []
dictory = {}
word_set = []
new_dict = {}
file_read = open('split_recai.txt', 'r')
file_write = open('percent_recai.txt', 'w+')
for line in file_read.readlines():
line = line.strip().split(' ')
for word in line:
words.append(word)
word_set = set(words) # exclude these repeated keyWords
length = len(words)
print length
for item in word_set :
dictory[item] = words.count(item) / length
new_dict = sorted( dictory.iteritems(), key = lambda d:d[1], reverse = True ) # sort by value
for key,value in new_dict:
file_write.write( key + ":" + str(value) )
file_write.write( '\n' )
|
[
"studywiller@gmail.com"
] |
studywiller@gmail.com
|
797e2d7a43e4b15dea8e59a7e042f26e1eb14caf
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/pdb2/pdbreader/symbol/LocalData32MsSymbol.pyi
|
aede84acc0cf9dc7b6e4bd9ac3bccadd11448bf2
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
pyi
|
import ghidra.app.util.bin.format.pdb2.pdbreader
import ghidra.app.util.bin.format.pdb2.pdbreader.symbol
import java.lang
class LocalData32MsSymbol(ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractLocalDataMsSymbol):
PDB_ID: int = 4364
def __init__(self, __a0: ghidra.app.util.bin.format.pdb2.pdbreader.AbstractPdb, __a1: ghidra.app.util.bin.format.pdb2.pdbreader.PdbByteReader): ...
def emit(self, __a0: java.lang.StringBuilder) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getOffset(self) -> long: ...
def getPdbId(self) -> int: ...
def getSegment(self) -> int: ...
def getTypeRecordNumber(self) -> ghidra.app.util.bin.format.pdb2.pdbreader.RecordNumber: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def pdbId(self) -> int: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
8edaaeaff5761d5891eaa8985ac25aacf93ad364
|
5f5d6c8b2470f483033d8b62ac8ab97236b7245b
|
/migrations/versions/3f2546f228d8_.py
|
bef59fb45c9031714eefef9bde800d8c075ba990
|
[] |
no_license
|
da-peng/ApiTestWeb
|
534d15859df14c87f4ae5e067887e79e3e2324c9
|
7cd61e06c4c2e7a5e0237451d831a58c825cffc2
|
refs/heads/master
| 2020-03-12T14:04:24.083825
| 2018-01-11T13:04:35
| 2018-01-11T13:04:35
| 130,658,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
"""empty message
Revision ID: 3f2546f228d8
Revises: 5d1fb7a39527
Create Date: 2018-01-11 14:46:27.252525
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3f2546f228d8'
down_revision = '5d1fb7a39527'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interface_detail', sa.Column('header', sa.String(length=255), nullable=True))
op.add_column('interface_detail', sa.Column('is_sign', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interface_detail', 'is_sign')
op.drop_column('interface_detail', 'header')
# ### end Alembic commands ###
|
[
"xiaopeng.wu@qlchat.com"
] |
xiaopeng.wu@qlchat.com
|
b1dc65782f757d291f0b3c8796390124c41932ae
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_204/285.py
|
05697ee8e397c0a3ab1f3fbfedb01bf8d507a112
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
from __future__ import print_function, division
from math import ceil, floor
import numpy as np
# Fernando Gonzalez del Cueto. Code Jam 2017
#infile = 'test2.in'
infile = 'B-small-attempt2.in'
outfile = infile.replace('.in', '.out')
fid = open(infile, 'r')
n_cases = int(fid.readline().strip())
f_out = open(outfile, 'w')
def solver(rata_q, p):
assert isinstance(rata_q, np.ndarray)
assert isinstance(p, np.ndarray)
n_ingredients, n_packages = p.shape
taken = np.zeros_like(p, dtype=bool)
lb = int(floor(np.min(np.min(0.9*p / rata_q, axis=1))))
ub = int(ceil(np.max(np.max(1.1*p / rata_q, axis=1))))
kits = 0
for q in range(lb, ub+1):
if (p==0).all():
return kits
t = (p >= rata_q * (q * 0.9)) & (p <= rata_q * (q * 1.1))
can_make = t.astype(np.uint8).sum(axis=1)
max_kits = can_make.min()
if max_kits.min() > 0:
kits += max_kits
if test_case==88:
pass
for row in range(p.shape[0]):
eliminated = 0
for col in range(p.shape[1]):
if t[row,col]:
p[row,col] = 0 # used, take them out
eliminated += 1
if eliminated >= max_kits:
break
return kits
for test_case in range(1,n_cases+1):
n_ingredients, n_packages = map(int, fid.readline().strip().split())
rata_q = map(int, fid.readline().strip().split())
r = np.array(rata_q).reshape((n_ingredients,1))
l = []
for i_ing in range(n_ingredients):
l.append(map(int, fid.readline().strip().split()))
a = np.array(l, dtype=np.float64)
print('Case %i' % test_case)
print(n_ingredients, n_packages)
print(rata_q)
print(a)
if test_case == 5:
pass
sol = solver(r, a)
print(sol)
l = 'Case #%i: %i\n' % (test_case, sol)
print(l)
f_out.write(l)
f_out.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6ba7c74d1f0f583ca61f56bca220f835b3f31a46
|
964475a2cac8640b21d6826123ef3cf7f78d3933
|
/lca.py
|
19641e6cbfe07ddf0bb5255c1e9b939a1074f457
|
[] |
no_license
|
gxyd/competitive-programming
|
06183ac6fe17e2594cdad4bc05ded99500fa2416
|
7fc4b4569d21226fd51a85a1637e362339f9aed1
|
refs/heads/master
| 2021-01-01T19:57:29.087457
| 2017-08-15T19:08:38
| 2017-08-15T19:08:38
| 98,727,624
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
#!/usr/bin/python3
def lca(root, v1, v2):
if root.data < v1 and root.data < v2:
return lca(root.right, v1, v2)
elif rooot.data > v1 and root.data > v2:
return lca(root.left, v1, v2)
return root
|
[
"gauravdhingra.gxyd@gmail.com"
] |
gauravdhingra.gxyd@gmail.com
|
e45a0cbad78f8302bf4f4c0de2c53829f40b4317
|
db0790c6164a6248ca5d349e66c829ead9304255
|
/config.py
|
e03682a5a071d9349522d2afef17f26ab075cf50
|
[] |
no_license
|
HaToan/Flask---Large-Application-Structure
|
a7c003151c0dc816e6b9f6dcd860e0d6c9d6a1d0
|
1312cb5e7d773dd872a6ba74e397a349f9701472
|
refs/heads/master
| 2021-01-21T17:37:02.185389
| 2017-05-21T16:19:47
| 2017-05-21T16:19:47
| 91,964,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'secret_key'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASky_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
MAIL_SERVER = 'smtp.viettel.com.vn'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWROD')
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development' : DevelopmentConfig,
'testing' : TestingConfig,
'production' : ProductionConfig,
'default' : DevelopmentConfig,
}
|
[
"kma.toanfanta@gmail.com"
] |
kma.toanfanta@gmail.com
|
40f417419f22ec79ca360d5c42f175cfe7e49417
|
b856f8ace132d7145ea652b6790b17bbcd05b2e5
|
/lecture_notes/02 abstraction/guesser_inproved.py
|
1c6e158a5f6f9b85dc8af6aa22312f3703043d1a
|
[] |
no_license
|
hrystiaaaaaa/introduction-to-it
|
e2341a752d904f394b537609bd8a6136bb7d0d89
|
f67e8752115eed04cbaef38fe57835754cb2716c
|
refs/heads/main
| 2023-09-05T11:45:44.546346
| 2021-11-17T11:13:08
| 2021-11-17T11:13:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
import random
def get_int_input():
while True:
raw_input = input("Enter number: ")
if raw_input.isdigit():
return int(raw_input)
else:
print(f"Sorry, '{raw_input}' is not a number")
def congratulate_user():
print("!!!!!!!!!")
print("!You won!")
print("!!!!!!!!!")
def play_game():
number = random.randint(1, 10)
user_guessed = False
while not user_guessed:
guess = get_int_input()
if guess == number:
congratulate_user()
user_guessed = True
while True:
play_game()
want_more = input("Wanna play one more game?")
if want_more != "Yes":
quit()
|
[
"artem.korotenko@betterme.world"
] |
artem.korotenko@betterme.world
|
727064be03a7e86e4fa580c15109167102e3c132
|
8ce88df2976d07fd287d1c96771694f4f9fd6c9a
|
/exe089.py
|
67fd95b8afc652e4103a90715421919ff7b56c85
|
[] |
no_license
|
gabialeixo/python-exercises
|
9974b9b2e15f92c7f58fb32c0cc0abdd5015a34c
|
f62f7ba49eb77fc16a37058f8974153491b565bb
|
refs/heads/master
| 2023-01-24T09:23:12.387968
| 2020-12-07T17:31:07
| 2020-12-07T17:31:07
| 305,712,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
#Crie um programa que leia nome e duas notas de vários alunos e guarde em uma lista composta. No final, mostre um boletim
#contendo a média de cada um e permita que o usuário possa mostrar as notas de cada aluno individualmente.
print('-' * 30)
print('{:^30}'.format(' COLÉGIO TUTTI FRUTTI '))
print('-' * 30)
dados = list()
while True:
nome = str(input('Digite o seu nome: '))
nota01 = float(input('Nota 01: '))
nota02 = float(input('Nota 02: '))
media = (nota01 + nota02) / 2
dados.append([nome, [nota01, nota02], media])
opcao = ' '
while opcao not in 'SN':
opcao = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
if opcao == 'N':
break
print('-' * 30)
print('{:^30}'.format(' BOLETIM '))
print('-' * 30)
print('{:<10}{:^10}{:>10}'.format('Nº','Nome','Média'))
print('-' * 30)
for i, a in enumerate(dados):
print(f'{i:<10}{a[0]:^10}{a[2]:>10.1f}')
while True:
print('-' * 30)
nota_geral = int(input('Mostrar nota de qual aluno? (999 para interromper) '))
if nota_geral == 999:
break
if nota_geral <= len(dados) - 1:
print(f'Notas do aluno(a) {dados[nota_geral][0]} são {dados[nota_geral][1]}')
print('{:=^40}'.format(' FIM DO PROGRAMA '))
|
[
"gabealeixo13@gmail.com"
] |
gabealeixo13@gmail.com
|
52d202d6a42b8401a4d91e31858d0e44793d16a5
|
c25145f9dff4dce51d52ce7062909cb0b70dcf16
|
/akun/migrations/0001_initial.py
|
5148f2f04bbfdbe6bfdd1a13787a440637a7c0bf
|
[] |
no_license
|
munawariz/PIRUS
|
e79929a2b18ad9c4ea837c323bbf53337d6ea80d
|
7462501743cf8c859b63f1561291c674604b37dd
|
refs/heads/master
| 2022-12-26T13:18:08.893051
| 2020-09-09T08:18:37
| 2020-09-09T08:18:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
# Generated by Django 3.1.1 on 2020-09-07 05:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('rumahsakit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('role', models.CharField(choices=[('A', 'Admin'), ('D', 'Direktur RS')], max_length=1)),
('username', models.CharField(max_length=10, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DirekturRS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama', models.CharField(blank=True, max_length=50, null=True)),
('email', models.CharField(max_length=50, null=True)),
('no_telp', models.CharField(max_length=13, unique=True)),
('rumahsakit', models.ManyToManyField(related_name='RS', to='rumahsakit.RumahSakit')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='akun_direktur', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama', models.CharField(blank=True, max_length=50, null=True)),
('email', models.CharField(max_length=50, null=True)),
('no_telp', models.CharField(max_length=13, unique=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='akun_admin', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"ganiyamustaga32@gmail.com"
] |
ganiyamustaga32@gmail.com
|
5f3d27220c537b50b9c139c8dd47915eabbb5f9e
|
e997c571172eb7a3771699c9644abb5847f9d305
|
/BA2E.py
|
ae8dcff113f3b91db6533c22bde9cfc599a875c8
|
[] |
no_license
|
Mila2305/Bioinformatics-pmfst
|
cef3d5245457b2c630a9985726ae577441451a6a
|
27d6fe26b1faeadcda94ae21963ea7f6920eaeeb
|
refs/heads/main
| 2023-07-30T08:49:30.029768
| 2021-09-13T07:35:36
| 2021-09-13T07:35:36
| 352,158,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
def kmer(text, i, k):
"""substring of text from i-th position for the next k letters"""
return text[i:(i+k)]
def Lwindows(text,L):
"""list of all L-windows in text"""
windows=list()
for i in range (0,len(text)-L+1):
windows.append(kmer(text,i,L))
return windows
def probability(window,profile):
# probability of kmer in string according to profile matrix
prob=1
for i in range (0,len(window)):
if window[i]=='A':
prob=prob*float(profile[0][i])
else:
if window[i]=='C':
prob = prob * float(profile[1][i])
else:
if window[i] == 'G':
prob = prob * float(profile[2][i])
else:
if window[i] == 'T':
prob = prob * float(profile[3][i])
return prob
def mostProbkmerinText(text,k,profile):
d=dict()
for window in Lwindows(text,k):
d[window]=probability(window,profile)
return [x[0] for x in d.items() if x[1]==max(d.values())][0]
def count(motifs,nucl,i):
# compute count for each nucleotide of i-th column
col=[motif[i] for motif in motifs]
num=0
if nucl==0:
num=len([n for n in col if n=='A'])
if nucl==1:
num=len([n for n in col if n=='C'])
if nucl==2:
num=len([n for n in col if n=='G'])
if nucl==3:
num=len([n for n in col if n=='T'])
return num
def capitalLetter(motifs,i):
# find a capital letter of i-th column
counts=[count(motifs,nucl,i) for nucl in range (0,4)]
return [nucl for nucl in range (0,4) if counts[nucl]==max(counts)][0]
def score(motifs):
sc=0
for i in range(0,len(motifs[0])):
sc=sc+(len(motifs)-count(motifs,capitalLetter(motifs,i),i))
return sc
def profileMatrixWithPseudocounts(motifs,k):
matrix=[]
for i in range(0,4):
matrix.append(list())
for i in range(0,k):
for nucl in range(0,4):
matrix[nucl].append((count(motifs,nucl,i)+1) / (len(motifs)+4))
return matrix
def greedyMotifSearch(dna, k, t):
BestMotifs = [kmer(s,0,k) for s in dna]
for motif in Lwindows(dna[0],k):
motifs=list()
motifs.append(motif)
for i in range(1,t):
profile=profileMatrixWithPseudocounts(motifs,k)
motifs.append(mostProbkmerinText(dna[i],k,profile))
if score(motifs)<score(BestMotifs):
BestMotifs=motifs
return BestMotifs
x = '''3 5
GGCGTTCAGGCA
AAGAATCAGTCA
CAAGGAGTTCGC
CACGTCAATCAC
CAATAATATTCG'''
inlines=x.split()
k=int(inlines[0])
t=int(inlines[1])
dna=list()
for i in range(2,len(inlines)):
dna.append(inlines[i])
res=greedyMotifSearch(dna,k,t)
print("\n".join(res))
|
[
"noreply@github.com"
] |
Mila2305.noreply@github.com
|
96be516e779a46c62fb18e74280531fe7f60457d
|
74d31a65c825469d2d0a1d6e7604455ca2fbf0e2
|
/src/definitions.py
|
df356cc9b79737f2dde6c19a214bff0567286993
|
[
"MIT"
] |
permissive
|
AHCoder/galaxy-integration-ps2
|
d80116619abc33140d8af5c260ca61ba3d7c263c
|
2d57a094e4447a2016074a42dd4d7b425985a48a
|
refs/heads/master
| 2021-09-27T05:14:12.927921
| 2021-09-14T18:15:20
| 2021-09-14T18:15:20
| 198,490,246
| 49
| 9
|
MIT
| 2019-11-06T16:11:03
| 2019-07-23T18:50:57
|
Python
|
UTF-8
|
Python
| false
| false
| 313
|
py
|
from dataclasses import dataclass
@dataclass
class PS2Game():
""" PS2Game object.
:param id: unique identifier of the game, this will be passed as parameter for methods such as launch_game
:param name: name of the game
:param path: path to the rom
"""
id: str
name: str
path: str
|
[
"ajnol.hasanic@gmail.com"
] |
ajnol.hasanic@gmail.com
|
e3ba2f5353b2b4b4ee3bb26523007b1f86aebad1
|
31fc98d10f55fb54a79147c41b9895158a69700e
|
/matplotlib_file/die_visual.py
|
35caee26caa1d8c48eef8fd76b0bb36c87bee728
|
[] |
no_license
|
zhouyangok/pythonLearning
|
cae45691af248374e486bbc6919294de818da9ac
|
375106908446ae5d8ac22c98dd230f5b827aa122
|
refs/heads/master
| 2021-04-15T04:05:36.839519
| 2018-03-22T08:44:24
| 2018-03-22T08:44:24
| 126,304,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from die import Die
import pygal
die_1 = Die()
die_2 = Die()
results = []
for roll_num in range(1000):
result = die_1.roll()+die_2.roll()
results.append(result)
#print(results)
# 分析结果
frequencies = []
max_result = die_1.num_sides+die_2.num_sides
for value in range(1,max_result+1):
frequency = results.count(value)
frequencies.append(frequency)
print(frequencies)
#对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling one D6 1000 times."
hist.x_labels = ['1','2','3','4','5','6']
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D6',frequencies)
hist.render_to_file('die_visual.svg')
|
[
"526731929@qq.com"
] |
526731929@qq.com
|
e01708431fd65fcc0a4127f59d368aa95e900742
|
8ef7a012b4be5a475fcaf1a67e18c92b61ad609a
|
/ProjectOpenCV/Proceso.py
|
e5c0f3f287368b06765480d13519a5e2a43082bc
|
[] |
no_license
|
QuantumCode2000/ProyectoFinalPythonOOP-PYDATA
|
8b212030ce44b4e0101b6b10b8edc887eec2f13d
|
1d6cc5a6fa5d5239975fcf2df2b6e3741cd1e7de
|
refs/heads/master
| 2022-07-17T02:49:32.733751
| 2020-05-13T03:11:10
| 2020-05-13T03:11:10
| 263,512,628
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
import cv2
class Proceso:
@staticmethod
def leido_de_iamgen(imagen_leida):
imagen = cv2.imread(imagen_leida)
return imagen
@staticmethod
def guardar_imagen(imagen, nombre_de_la_imagen_nueva):
if(nombre_de_la_imagen_nueva.isalnum()):
nombre_de_la_imagen = nombre_de_la_imagen_nueva + ".jpg"
cv2.imwrite(nombre_de_la_imagen, imagen)
else:
print(" ********no es un formato correcto para asignarle un nombre a la imagen********")
@staticmethod
def mostrar_imagen_en_ventana(nombre_de_la_ventana, imagen_a_mostrar):
cv2.namedWindow(nombre_de_la_ventana, cv2.WINDOW_NORMAL)
cv2.imshow(nombre_de_la_ventana, imagen_a_mostrar)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"64666591+Dozz2000@users.noreply.github.com"
] |
64666591+Dozz2000@users.noreply.github.com
|
f4de52fc72912e329b602c84aae12ef5fd2e76c4
|
946a9ccf4adb92c9c694c7c127a6c010a17248ad
|
/inventory/settings.py
|
3bb5451cdace3ccad977acde0f097c1d15cdbb00
|
[] |
no_license
|
king-ellie/django-inventory-app
|
7cf33e5a85482dd9f3218075b4233fa4cd9f421e
|
3be5a79b64c64ab6687837d7d31e249cf40456bd
|
refs/heads/main
| 2023-07-31T16:32:31.981220
| 2021-09-07T13:32:45
| 2021-09-07T13:32:45
| 403,991,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,409
|
py
|
"""
Django settings for inventory project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-!nisc=z8pdvdam@fqhig0v8xkoi(qe=un-%j!k)yy%@(pd*&nk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ownership.apps.OwnershipConfig',
'debug_toolbar'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
INTERNAL_IPS = [
'127.0.0.1'
]
ROOT_URLCONF = 'inventory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inventory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"61605543+king-ellie@users.noreply.github.com"
] |
61605543+king-ellie@users.noreply.github.com
|
f834fcad5971ebffddb726d554e46f5cace2caa6
|
c88795933123416f6ccf8b063454af70ae47ec23
|
/manage.py
|
284383db775fd893ec9920460741c0d71c000076
|
[] |
no_license
|
Raefat/5dolar-website-meme
|
77159daa275764c2cdb2682d573c7df5671a4759
|
6c219d0d639516de77b117bc4a2e244d60814fe0
|
refs/heads/master
| 2023-08-09T01:20:18.525176
| 2020-07-07T20:18:57
| 2020-07-07T20:18:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frst_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"raefetex@gmail.com"
] |
raefetex@gmail.com
|
f095f493b8c86691cddc688e4d19ccaf71870c88
|
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
|
/source/Clarification/Backtracking/52.N皇后2.py
|
0850985c798d542a6dbf9bbf340bfa76bed00408
|
[
"MIT"
] |
permissive
|
zhangwang0537/LeetCode-Notebook
|
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
|
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
|
refs/heads/master
| 2022-11-13T21:08:20.343562
| 2020-04-09T03:11:51
| 2020-04-09T03:11:51
| 277,572,643
| 0
| 0
|
MIT
| 2020-07-06T14:59:57
| 2020-07-06T14:59:56
| null |
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
# n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
#
#
#
# 上图为 8 皇后问题的一种解法。
#
# 给定一个整数 n,返回 n 皇后不同的解决方案的数量。
#
# 示例:
#
# 输入: 4
# 输出: 2
# 解释: 4 皇后问题存在如下两个不同的解法。
# [
# [".Q..", // 解法 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // 解法 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
class Solution:
def totalNQueens(self, n: int) -> int:
def is_not_under_attack(row,col):
return not (rows[col] or hills[row - col] or dales[row + col])
# 放置皇后
def place_queen(row,col):
rows[col] = 1
hills[row - col] = 1 # 主对角线
dales[row + col] = 1 # 副对角线
# 移除皇后
def remove_queen(row,col):
rows[col] = 0
hills[row - col] = 0 # 主对角线
dales[row + col] = 0 # 副对角线
# 回溯
def backtrack(row = 0,count = 0):
for col in range(n):
if is_not_under_attack(row, col):
place_queen(row, col)
if row + 1 == n: # 如果放了n个皇后,则有一种解决方案
count += 1
else:
count = backtrack(row + 1,count)
remove_queen(row, col)
return count
rows = [0] * n
hills = [0] * (2 * n - 1) # 主对角线
dales = [0] * (2 * n - 1) # 副对角线
return backtrack()
|
[
"mzm@mail.dlut.edu.cn"
] |
mzm@mail.dlut.edu.cn
|
1bccbe2e72188711b68cde5b7e4c2a6c845c91c3
|
bbca6268e6b30e6945eee5d5d1852e0b164e294a
|
/tinyos-2.x/apps/LightControl/scripts/k_subset.py
|
02263c7bed02351c6713bca1820936e5bd83eb8c
|
[] |
no_license
|
nesl/hotline
|
6f95415a1b00a21fa4cfe1226590338b6380fe2f
|
46f71f7b505b3ede668ab74df39e6a1542d5f043
|
refs/heads/master
| 2021-01-01T17:33:44.725494
| 2011-09-21T20:06:33
| 2011-09-21T20:06:33
| 12,227,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import sys
import random
# Adapted from
# http://code.activestate.com/recipes/156453-choose-a-random-k-subset-of-12-n/
def ranksb1(n,k):
if k > n: raise Exception, "N must be no less than K"
if k > n * 2 // 3:
pool = range(n)
for i in xrange(n-1, n-k-1, -1):
j = random.randrange(i+1)
pool[i], pool[j] = pool[j], pool[i]
return pool[-k:]
selections = {}
while k > len(selections):
value = random.randrange(1, n+1)
if value-1 in selections:
continue
selections[value-1] = True
return selections.keys()
|
[
"rahulb@3d136852-4100-4a3c-8742-798efa9d6f7c"
] |
rahulb@3d136852-4100-4a3c-8742-798efa9d6f7c
|
f4017d322fdadade24c60a26ae72bb14aad8170d
|
357db88164920a3d0a650d597b8183bfe5a60b1a
|
/probeplot.py
|
8e344e00143084c7b05aebd0986e344b041d30aa
|
[] |
no_license
|
Hasini74/Flush-Reload
|
d88b024503787b23b9f81fcbebc8626b5880c8f0
|
116c125c93d24a2f9a46c7574c7e8d8ee76802ad
|
refs/heads/main
| 2023-04-22T04:33:52.226341
| 2021-05-10T20:42:51
| 2021-05-10T20:42:51
| 366,164,990
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from matplotlib import pyplot as plt
import seaborn as sns
with open('probes.txt') as f:
array = []
for val in f.read().split():
if int(val) < 400:
array.append(int(val))
plt.figure(figsize=(15, 8))
ax = sns.distplot(array)
ax.set_title("Histogram of probe timings (n= {})".format(len(array)))
ax.set_xlabel("CYCLES")
plt.savefig('probe_histogram.png')
|
[
"noreply@github.com"
] |
Hasini74.noreply@github.com
|
9d9ed5f5767b7fd951eb6ad1a2a01ca63fc8e5ed
|
56b63ee537f872af0fc028016d1508b4c1dd5c60
|
/school/migrations/0267_auto_20210317_1657.py
|
926120cf7259917f9c79aaa27206e75ae9e960a4
|
[] |
no_license
|
jacknjillsolutionsrevanth/EMS1
|
01fc571120f765b0fbfe3aa654b15ff578d6e9b9
|
db14d8e6c15669b5938aa9276c5e22006218814a
|
refs/heads/main
| 2023-08-03T19:40:50.073133
| 2021-10-01T07:02:37
| 2021-10-01T07:02:37
| 410,202,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# Generated by Django 3.1.4 on 2021-03-17 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0266_auto_20210315_1612'),
]
operations = [
migrations.AddField(
model_name='daily_data',
name='routename',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='milkdata',
name='branch',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"jacknjillsolutions.revanth@gmail.com"
] |
jacknjillsolutions.revanth@gmail.com
|
3c060f1e62118348d6376d5a27dd8b1292ecdeee
|
37faa935d3af283be72423821a25d17172f2df6a
|
/lessons/csv/reading_csv.py
|
c7fae192cbcfc7131cca853fbf0501b66ae81dd6
|
[] |
no_license
|
rogeriolaguilar/python
|
2b214fccbb32ca269c745b26fbc082cb20981c23
|
fef86ca360e5a47fcd86f9ab99c04c58d353705b
|
refs/heads/master
| 2020-03-19T11:00:51.367723
| 2018-09-16T12:56:39
| 2018-09-16T12:56:39
| 136,421,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
import csv
import os
FILENAME=os.getcwd()+'/lessons/csv/hightemp.csv'
def read_csv(csv_name, keyname):
print(csv_name)
table = {}
with open(csv_name) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
table[row[keyname]] = row
return table
result = read_csv(FILENAME, 'City')
print("Baghdad in Jan =", result['Baghdad']['Jan'])
|
[
"rogerio.l.aguilar@gmail.com"
] |
rogerio.l.aguilar@gmail.com
|
e78baff9bdff094df6a9bde81c9da4513c0aa5b9
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/4/jyb.py
|
ca9d3c9ec4d59763f4fd3413bf1ece65baed5ad0
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'jYB':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
230e160aee1fabd4bf85f0361b78a2acea3232e0
|
f8ff6a467536e37e4331aec6a53ccefccd13d31e
|
/ServerPython/user.py
|
e66127470f207c11f8cd4a9ecb00bab171587121
|
[
"MIT"
] |
permissive
|
zzragida/PythonExamples
|
0a7971b4b3564eab5ef7a7d045161ac35f627340
|
ed94ae2773a580a42e158ebdc7321a89ca4e991b
|
refs/heads/master
| 2021-01-18T15:06:56.763700
| 2016-02-16T02:41:59
| 2016-02-16T02:41:59
| 50,626,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,685
|
py
|
# -*- coding:utf-8 -*-
from twisted.python import failure
from collections import OrderedDict
from logger import logger
from hero import Hero
from costume import Costume
from item import Item
from properties import Properties
from cache import Cache
from protocol import gateway_pb2
import db
import random
import math
class User:
""" User class """
_selected_hero = None
_heroes = OrderedDict()
_costumes = OrderedDict()
_items = OrderedDict()
_events = OrderedDict()
_payments = OrderedDict()
_friends = OrderedDict()
_kakao_friends = OrderedDict()
_gifts = OrderedDict()
_achivements = OrderedDict()
@staticmethod
def get_user_id(game_id, hashed_kakao_id, kakao_id=-1):
user_id = db.make_pp_user(game_id, kakao_id, hashed_kakao_id)
if user_id > 0: return User(user_id)
return failure.Failure((gateway_pb2.EC_UNABLE_TO_OPERATE, 'User not found'))
#----------------------------------------------------------------#
def __init__(self, user_id):
self._user_id = user_id
self._load()
def __del__(self):
pass
def _load(self):
assert(self._user_id > 0)
self._load_user()
self._load_heroes()
self._load_costumes()
self._load_items()
for hero in self._heroes.values():
hero.load_stage()
hero.load_skills()
hero.update_attributes()
hero.update_unlock_stage_count()
self._load_rankings()
self._load_events()
self._load_coupons()
self._load_gifts()
self._load_payments()
self._load_friends()
self._load_achivements()
self._load_kakao_friends()
def _load_user(self):
assert(self._user_id > 0)
info = db.user(self._user_id)
if not info: return failure.Failure((gateway_pb2.EC_DATABASE, 'Database failed'))
self._info = info
def _load_heroes(self):
assert(self._user_id > 0)
self._heroes.clear()
for info in db.user_heroes(self._user_id):
job = info['job']
self._heroes[job] = Hero(info)
def _load_costumes(self):
assert(self._user_id > 0)
self._costumes.clear()
for info in db.user_costumes(self._user_id):
costume_id = info['costume_id']
costume_no = info['costume_no']
job = info['job']
level = info['level']
costume = Costume(costume_no, job, level)
self._costumes[costume_id] = costume
if self._heroes.has_key(job):
hero = self._heroes[job]
hero.add_costume(costume)
def _load_items(self):
assert(self._user_id > 0)
self._items.clear()
for info in db.user_items(self._user_id):
item_id = info['item_id']
hero_id = info['hero_id']
item = Item(info)
self._items[item_id] = Item(info)
if hero_id:
for hero in self._heroes.values():
if hero_id == hero.hero_id():
hero.attach_equip(item)
break
def _load_rankings(self):
assert(self._user_id > 0)
assert(self._info)
def _load_events(self):
assert(self._user_id > 0)
assert(self._info)
def _load_coupons(self):
assert(self._user_id > 0)
assert(self._info)
def _load_gifts(self):
self._gifts.clear()
self._gifts = db.user_gifts(self._user_id)
def _load_payments(self):
assert(self._user_id > 0)
assert(self._info)
def _load_friends(self):
assert(self._user_id > 0)
assert(self._info)
def _load_achivements(self):
assert(self._user_id > 0)
assert(self._info)
def _load_kakao_friends(self):
assert(self._user_id > 0)
assert(self._info)
#----------------------------------------------------------------#
# 키관련
def online_key(self, user_id=-1):
pass
def personal_key(self, user_id=-1):
pass
def ranker_key(self, user_id=-1):
pass
def story_key_(self, difficulty=-1):
pass
def story_detail_key_(self, difficulty=-1, user_id=-1):
pass
#----------------------------------------------------------------#
def user_id(self): return self._user_id
def kakao_id(self): return self._info['kakao_id']
def nickname(self): return self._info['nickname']
def honbul(self): return self._info['honbul']
def cash(self): return self._info['cash']
def talisman(self): return self._info['talisman']
def stone(self): return self._info['stone']
def coin(self): return self._info['coin']
def heart(self): return self._info['heart']
def searchable(self): return self._info['searchable']
def inventory_size(self): return self._info['inventory_size']
def hero_count(self): return len(self._heroes)
def no_kakao_message(self): return self._info['no_kakao_message']
def no_kakao_profile(self): return self._info['no_kakao_profile']
def review(self): return self._info['review']
#----------------------------------------------------------------#
def fill_info(self, info):
if not self._selected_hero:
return failure.Failure((gateway_pb2.EC_NO_HERO, "No hero"))
if not self._info and not self._info.has_key('nickname'):
return failure.Failure((gateway_pb2.EC_NO_NICKNAME, "Need nickname"))
info.honbul = self.honbul()
info.cash = self.cash()
info.talisman = self.talisman()
info.stone = self.stone()
info.coin = self.coin()
info.heart = self.heart()
info.inventory_size = self.inventory_size()
info.searchable = self.searchable()
info.no_kakao_message = self.no_kakao_message()
info.no_kakao_profile = self.no_kakao_profile()
info.review = self.review()
info.nickname = self.nickname()
info.ranking = 0
info.terminate_abnormally = False
if self._selected_hero:
self._selected_hero.fill_hero(info.selected)
self._selected_hero.fill_dungeons(info.dungeons)
self.fill_badges(info.badges)
def fill_properties(self, properties):
properties.honbul_for_expand_skill_button = Properties.HONBUL_FOR_EXPAND_SKILL_BUTTON
properties.cash_for_expand_skill_button = Properties.CASH_FOR_EXPAND_SKILL_BUTTON
properties.reset_cash_for_material_cooltime = Properties.RESET_CASH_FOR_MATERIAL_COOLTIME
properties.collect_material_multiplier = Properties.COLLECT_MATERIAL_MULTIPLIER
properties.max_reset_material_cooltime = Properties.MAX_RESET_MATERIAL_COOLTIME
properties.cash_for_resurrection = Properties.CASH_FOR_RESURRECTION
properties.coin_for_resurrection = Properties.COIN_FOR_RESURRECTION
properties.needs_resurrection_by_cash = self.needs_resurrection_by_cash()
properties.needs_resurrection_by_coin = self.needs_resurrection_by_coin()
properties.hero_level_for_multiplay = Properties.HERO_LEVEL_FOR_MULTIPLAY
properties.level_for_new_archer = Properties.LEVEL_FOR_NEW_ARCHER
properties.honbul_for_new_hero = Properties.HONBUL_FOR_NEW_HERO
properties.cash_for_inventory_slot = Properties.CASH_FOR_INVENTORY_SLOT
properties.max_hero_level = Properties.MAX_HERO_LEVEL
properties.send_heart_amount = Properties.SEND_HEART_AMOUNT
properties.reward_of_send_heart = Properties.REWARD_OF_SEND_HEART
properties.max_friend_count = Properties.MAX_FRIEND_COUNT
properties.reward_of_kakao_invitation = Properties.REWARD_OF_KAKAO_INVITATION
properties.battle_skip_star1 = Properties.BATTLE_SKIP_STAR1
properties.battle_skip_star2 = Properties.BATTLE_SKIP_STAR2
properties.battle_skip_star3 = Properties.BATTLE_SKIP_STAR3
properties.battle_skip_star4 = Properties.BATTLE_SKIP_STAR4
properties.battle_skip_star5 = Properties.BATTLE_SKIP_STAR5
properties.honbul_for_battle_skip = Properties.HONBUL_FOR_BATTLE_SKIP
properties.battle_skip_exp = Properties.BATTLE_SKIP_EXP
properties.battle_skip_probability = Properties.BATTLE_SKIP_PROBABILITY
properties.discount_for_oni_shop_honbul = Properties.DISCOUNT_FOR_ONI_SHOP_HONBUL
properties.discount_for_oni_shop_cash = Properties.DISCOUNT_FOR_ONI_SHOP_CASH
properties.discount_for_reset_skill = Properties.DISCOUNT_FOR_RESET_SKILL
properties.closing_dungeon_timeout = Properties.CLOSING_DUNGEON_TIMEOUT
properties.select_stage_timeout = Properties.SELECT_STAGE_TIMEOUT
def fill_badges(self, badges):
pass
#----------------------------------------------------------------#
# 접속/로그인
def login(self, user_id):
pass
def logout(self):
pass
def online(self):
pass
def offline(self):
pass
#----------------------------------------------------------------#
# 게임관련
def start_game(self):
pass
def end_game(self):
pass
def leave_in_game(self, play_time, exp, honbul):
pass
def finish_game(self, honbul):
pass
def finish_single_game(self, stage, finish):
pass
def finish_survival_game(self, wave, score, honbul, play_time):
pass
def needs_resurrection_by_cash(self):
return 0
def needs_resurrection_by_coin(self):
return 0
#----------------------------------------------------------------#
# 닉네임/캐릭터
def has_nickname(self):
assert(self._info)
if not self._info.has_key('nickname'): return False
if self._info['nickname'] is None: return False
return True
def change_nickname(self, new_nickname):
if not db.change_nickname(self._user_id, new_nickname):
return failure.Failure((gateway_pb2.EC_DATABASE, "Database failed"))
self._info['nickname'] = new_nickname
def has_hero(self, job):
return self._heroes.has_key(job)
def make_hero(self, job):
hero_info = db.make_hero(self.user_id(), job)
if not hero_info:
return failure.Failure((gateway_pb2.EC_DATABASE, "Database failed"))
hero = Hero(hero_info)
self._heroes[job] = hero
self._selected_hero = hero
return self._selected_hero
def select_hero(self, job):
hero = self._heroes[job]
self._selected_hero = hero
return self._selected_hero
def hero_by_job(self, job):
pass
def hero_by_id(self, hero_id):
pass
def selected_hero(self):
return self._selected_hero
def max_hero_level(self):
assert(self._heroes)
max_level = 0
for hero in self._heroes.values():
if hero.level() > max_level:
max_level = hero.level()
return max_level
def fill_heroes(self, response):
assert(self._heroes)
for h in self._heroes.values():
hero = response.heroes.add()
hero.job = h.job()
hero.level = h.level()
#----------------------------------------------------------------#
# 스테이지관련
def fill_dungeons(self, dungeons):
if not self._selected_hero:
return failure.Failure((gateway_pb2.EC_NO_HERO, "No Hero"))
return self._selected_hero.fill_dungeons(dungeons)
def fill_epic_dungeons(self, epic_dungeons):
if not self._selected_hero:
return failure.Failure((gateway_pb2.EC_NO_HERO, "No Hero"))
return self._selected_hero.fill_epic_dungeons(epic_dungeons)
#----------------------------------------------------------------#
# 의상관련
def has_costume(self, costume_id):
pass
def add_costume(self, type):
pass
def buy_costume(self, costume_id, cost):
pass
def make_costume(self, costume_id, cost):
pass
def reinforce_costume(self, costume_id, honbul, cash):
pass
def fetch_costume(self, hero):
pass
#----------------------------------------------------------------#
# 인벤토리/아이템
def fill_inventory(self, inventory):
inventory.limit = self.inventory_size()
inventory.honbul = self.honbul()
inventory.cash = self.cash()
for item in self._items().values():
if item.hero_id() > 0: continue
entry = inventory.entries.add()
entry.item_id = item.item_id()
entry.type = item.type()
def add_inventory_slot(self, count):
pass
def empty_slot_in_inventory(self):
pass
def has_excessing_inventory(self):
pass
def put_on(self, item):
pass
def take_off(self, item):
pass
def reinforce_item(self, item, honbul, cash, stone, success=True, crash=False):
pass
def fix_item(self, item, cost):
pass
def get_item_by_id(self, item_id):
pass
def get_item_by_no(self, item_no):
pass
def get_item_count_by_no(self, item_no):
pass
def has_item(self, item_no, count):
pass
def drop_item(self, item, count):
pass
def remove_item(self, item):
pass
def make_item(self, item_no, blueprint):
pass
def add_item(self, item_no):
pass
#----------------------------------------------------------------#
# 선물함
def add_gift(self, gift_id):
pass
def delete_gift(self, gift):
pass
#----------------------------------------------------------------#
# 빠른수집
def collect_material(self, material_id, collect_material):
if self.material_cooltime() > 0:
return failure.Failure((gateway_pb2.EC_UNABLE_TO_OPERATE, 'Exist cooltime'))
material_price = Cache.material_price(material_id)
if not material_price:
return failure.Failure((gateway_pb2.EC_UNABLE_TO_OPERATE, 'Material is not exist'))
if self.honbul() < material_price['price']:
return failure.Failure((gateway_pb2.EC_NOT_ENOUGH_HONBUL, 'Need more honbul'))
# 재료생성
RND = random.randint
R = round
C = math.ceil
F = math.floor
amount = eval(material_price['amount'])
amount *= Properties.COLLECT_MATERIAL_MULTIPIER
# 응답저장
collect_material.material_id = material_id
collect_material.amount = amount
collect_material.honbul = self.honbul()
def material_cooltime(self, material_cooltime):
material_cooltime.cooltime = self.material_cooltime()
max_count = Properties.COLLECT_MATERIAL_PER_DAY
material_cooltime.current_count = max_count - self.material_count()
material_cooltime.max_count = max_count
material_cooltime.reset_count = self.material_reset_count()
def reset_material_cooltime(self):
pass
#----------------------------------------------------------------#
# 캐쉬상점
def fill_eshop(self, market, eshop):
pass
def buy_in_eshop(self, pay_id, payment):
pass
def has_eshop_event(self):
pass
#----------------------------------------------------------------#
# 충전소
def fill_cash_shop(self, market, cash_shop):
pass
def buy_in_cash_shop(self, goods, cash_shop):
pass
#----------------------------------------------------------------#
# 도깨비상점
def fill_reset_skill(self, reset_skill):
price = Properties.HONBUL_FOR_RESET_SKILL
discount = Properties.DISCOUNT_FOR_RESET_SKILL
if discount > 0:
price -= int(price * (discount/100.0))
reset_skill.skill_point = self._selected_hero.reset_skill_point()
reset_skill.price = price
def fill_oni_shop(self, method, category, oni_shop):
pass
def buy_in_oni_shop(self, price, discount, goods, oni_shop):
pass
#----------------------------------------------------------------#
# 서바이벌던전
def check_survival_try(self):
pass
def spend_survival_try(self):
pass
def reset_survival_try(self):
pass
def update_survival_wave_record(self, wave):
pass
def fill_survival_buff(self, survival_buff):
assert(self._info)
survival_buff.survival_try_count = 0
survival_buff.survival_try_per_day = 0
survival_buff.reset_cooltime = 0
survival_buff.wave = 0
survival_buff.ranking = 0
for key, val in Cache.survival_buffs().iteritems():
buff = survival_buff.buffs.add()
buff.buff_id = key
buff.price = val['price']
buff.name = val['name']
buff.lock = True
#----------------------------------------------------------------#
# 사용자이력
#----------------------------------------------------------------#
# for test
def set_cash(self, cash):
db.execute("UPDATE ARPG_GT_USER SET CASH = %d WHERE USER_ID = %d" % (cash, self._user_id))
self._info['cash'] = cash
def set_honbul(self, honbul):
db.execute("UPDATE ARPG_GT_USER SET HONBUL = %d WHERE USER_ID = %d" % (honbul, self._user_id))
self._info['honbul'] = honbul
def set_talisman(self, talisman):
db.execute("UPDATE ARPG_GT_USER SET TALISMAN = %d WHERE USER_ID = %d" % (talisman, self._user_id))
self._info['talisman'] = talisman
def set_stone(self, stone):
db.execute("UPDATE ARPG_GT_USER SET STONE = %d WHERE USER_ID = %d" % (stone, self._user_id))
self._info['stone'] = stone
def set_coin(self, coin):
db.execute("UPDATE ARPG_GT_USER SET COIN = %d WHERE USER_ID = %d" % (coin, self._user_id))
self._info['coin'] = coin
def set_heart(self, heart):
db.execute("UPDATE ARPG_GT_USER SET HEART = %d WHERE USER_ID = %d" % (heart, self._user_id))
self._info['heart'] = heart
#----------------------------------------------------------------#
|
[
"zzragida@gmail.com"
] |
zzragida@gmail.com
|
73dbbcaf97f0a2613100f2925da30caaf3381b7c
|
61bf34554ab8ca7bd11550e45533caf0261152e7
|
/generic.py
|
6ee5c450ad801ca64c88c462e6c5c0d90504205e
|
[] |
no_license
|
drouetd/Scrape-tools
|
c85cca3d7224cc42630b43e3e8a7243fcf64b364
|
4105ebcb51df06553b6600f7f988bcb7e89d58a6
|
refs/heads/master
| 2021-01-10T12:21:27.315200
| 2016-04-09T16:04:16
| 2016-04-09T16:04:16
| 55,608,885
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
#!/usr/bin/env python
import sys
import csv
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import parsers
def get_html(url=None):
""" Returns the raw html for a given url """
driver = webdriver.PhantomJS()
driver.get(url)
# retrieve the desired page
try:
WebDriverWait(driver, 2)
html = driver.page_source
except:
print "%s occurred while trying to read page." % (sys.exc_info()[0].__name__)
return
finally:
driver.quit()
return html
def write_to_csv(filename, fields, records):
""" Writes a list of dictionaries to a csv file. """
with open(filename, 'wb') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fields, delimiter=',',quotechar='"',quoting=csv.QUOTE_ALL)
# writer.writeheader()
for rec in records:
try:
writer.writerow(rec)
except:
print "%s occurred with %s" % (sys.exc_info()[0].__name__, rec[fields[0]])
print rec
print'\n'
return
if __name__ == "__main__":
# page specific setup
page_url = "http://www.aeromontreal.ca/member-list.html?count=184#member5"
parser = parsers.parse_aero
output_filename = 'Data/test.csv'
# hack for dealing with accented text
reload(sys)
sys.setdefaultencoding('utf-8')
# read page and extract data
html = get_html(page_url)
if html:
#bsoup = BeautifulSoup(html, 'html5lib')
record_list = parser(html)
# write records to csv
fields = ['name', 'phone', 'email', 'website']
write_to_csv(output_filename, fields, record_list)
|
[
"drouetd@gmail.com"
] |
drouetd@gmail.com
|
c720729c2e53c7a2d567a492348db29f3707f53d
|
837d61048ea502e1ab057525ebd4d108ccf8e3d1
|
/Project/starter.py
|
ae7a7d9bdf121ad827c2194c28a36ba30f5909ff
|
[] |
no_license
|
JakubKucecka/VyhladavanieInformacii
|
7e7d9e5856dbfb4104adc97b5ab2e8cd6b8ee478
|
a2eaaee47901d70cb1ed34b6f456917dd808ced1
|
refs/heads/main
| 2023-01-29T07:09:00.891186
| 2020-12-08T15:54:16
| 2020-12-08T15:54:16
| 300,634,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
#!/usr/bin/env python3
import lib.pars as pars
import lib.dump as dump
import lib.pair as pair
import lib.sort as sort
import lib.index as index
import lib.search as search
"""
global variables that determine the paths to files
"""
actors_file = "etc/parse/actors.gz"
performances_file = "etc/parse/performances.gz"
other_file = "etc/parse/other.gz"
final_file = "etc/parse/final.gz"
index_dir = "etc/index"
"""
initialization dictionary
"""
ACTOR = {}
PERF_FILM = {}
FILM_ID_NAME = {}
"""
run the dump function
"""
cmd = input("\nDo you want to run a dump? [y|n]: ")
if cmd == "y":
dump.dump(actors_file, performances_file, other_file, 0)
elif cmd != "n":
print("ERROR: Unknown input")
exit(1)
"""
run the pars, pair, and sort / write / index functions
"""
cmd = input("\nDo you want to run a pars, pair and sort? [y|n]: ")
if cmd == "y":
[ACTOR, PERF_FILM, FILM_ID_NAME] = pars.pars(actors_file, performances_file, other_file, ACTOR, PERF_FILM,
FILM_ID_NAME)
ACTOR = pair.pair(ACTOR, PERF_FILM, FILM_ID_NAME)
del FILM_ID_NAME
del PERF_FILM
sort.sort(ACTOR, final_file)
del ACTOR
index.index(index_dir, final_file)
elif cmd != "n":
print("ERROR: Unknown input")
exit(1)
"""
launch the search function
"""
cmd = input("\nDo you want to run a search? [y|n]: ")
if cmd == "y":
ret = search.search(index_dir)
if ret != "n":
print("ERROR: Unknown input")
exit(1)
elif cmd != "n":
print("ERROR: Unknown input")
exit(1)
exit(0)
|
[
"jakubkucecka@gmail.com"
] |
jakubkucecka@gmail.com
|
e8bb422108875b71cf15770aa2e5b5baac11064d
|
53d59691b31efc73b0baa76e0581d66f290c4ef7
|
/ip_dec_bin_convert.py
|
47bf11770c55e1a07a88c79e0e5ab7c173128481
|
[
"Apache-2.0"
] |
permissive
|
shamurti/Python_Class
|
6dec1495379cdc67eff534396e5e1acc820b881c
|
e4da52ad0b5061e0fa127a03157be43a08ebe28b
|
refs/heads/master
| 2021-01-10T04:19:56.507050
| 2018-11-19T20:20:10
| 2018-11-19T20:20:10
| 49,674,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
#!/usr/bin/env python
ip = raw_input("\nEnter an IP address in dotted decimal format: ")
octets = ip.split(".")
print "\nThe octets of the IP address: %s in binary are: \n" % (ip)
print "%20s %20s %20s %20s" % ('first_octet','second_octet','third_octet','fourth_octet')
print "%20s %20s %20s %20s" % (bin(int(octets[0])),bin(int(octets[1])),bin(int(octets[2])),bin(int(octets[3])))
|
[
"Oghab@Shahan.local"
] |
Oghab@Shahan.local
|
4bb7d73eb57ca31465156c27a8acb11216bf381b
|
ce04cdcf2f751fc665ccf3d2a011c6c5c1a4aca2
|
/Exploitation/Reverse-Engineering/RATs-Backdooring/ShellBot/client/selfUpdate.py
|
12222067bdfeb97b6acbf48bda9b19f3ca9f9ec9
|
[
"MIT"
] |
permissive
|
R3dFruitRollUp/TID3xploits
|
fa2cef2f42553579a3f2d447b1d8ae7776b12dd8
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
refs/heads/master
| 2021-05-12T01:11:17.315961
| 2018-01-04T05:03:20
| 2018-01-04T05:03:20
| 117,551,951
| 2
| 1
| null | 2018-01-15T13:55:19
| 2018-01-15T13:55:18
| null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
#!/usr/bin/env python3
import os
import sys
import urllib.request
import tempfile
import shutil
import json
def getURL(owner, repo, name):
repoUrl = 'https://api.github.com/repos/{{}}/{{}}/releases/latest'\
.format(owner, repo)
response = urllib.request.urlopen(repoUrl)
json_val = json.loads(response.read().decode())
for file in json_val['assets']:
if name == file['name']:
return file['browser_download_url']
def download(fileUrl, file):
with urllib.request.urlopen(fileUrl) as response, open(file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if sys.platform == "win32": os.system("taskkill /F /T /PID {pid}")
else: os.system("kill {pid}")
if {frozen}:
url = getURL('sayak-brm', 'ShellBot', 'client.exe')
download(url, r"{exe}")
else:
url = getURL('sayak-brm', 'ShellBot', 'client.py')
download(url, r"{arg}")
if sys.platform == "win32":
if {frozen}:
os.system(r"{exe} {host} {port}")
else:
runner = 'CreateObject("WScript.Shell").Run WScript.Arguments(0), 0'
with open(tempfile.gettempdir() + r"/runner.vbs", "w") as f:
f.write(runner)
os.system(tempfile.gettempdir() + r'/runner.vbs "{exe} {arg} {host} {port}"')
else: os.system(r"nohup {exe} {arg} {host} {port} > /dev/null 2>&1 &")
|
[
"noreply@github.com"
] |
R3dFruitRollUp.noreply@github.com
|
1f6e97b6fae3bcc121943a41542b27b69deeafab
|
8c77dcc0fd3e497194e572c8641200f08b32dc97
|
/general/function_factory.py
|
f2c923b5d537856aae039428a6462973dfd14e56
|
[
"MIT"
] |
permissive
|
bpuderer/python-snippets
|
633a1e382f7c9812621d61ec16a15e106d1d5fc8
|
3277b76b03f3ceb11e4571be4cecae68051aac45
|
refs/heads/master
| 2021-12-27T09:19:27.212312
| 2021-12-22T13:08:56
| 2021-12-22T13:08:56
| 46,539,064
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
def raise_to(exp):
def raise_to_exp(x):
return pow(x, exp)
return raise_to_exp
cube = raise_to(3)
print(cube(4))
|
[
"bpuderer@yahoo.com"
] |
bpuderer@yahoo.com
|
ec2e424faed3e15c10e4986aea599eb9be839086
|
e3aa7ed3985d0f6fd6aefccc9605b8c392b3f912
|
/.ipynb_checkpoints/datasets-checkpoint.py
|
d7c0683f957b9abe59f9ec8d322c87a5f211f170
|
[] |
no_license
|
jypark1994/VLRRProject
|
628455ff9210ffd5b278b7d6886fcfc3c01452fb
|
38526268f16d01fd84a9542402037d41b8d2c245
|
refs/heads/master
| 2022-12-16T23:45:30.979390
| 2020-08-14T02:46:57
| 2020-08-14T02:46:57
| 286,142,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
from torch import utils
from torchvision import datasets, transforms
# args : batch_size, num_workers, down_scale
def ILSVRC_Birds(args):
transforms_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.Resize(224//args.down_scale),
transforms.ToTensor(),
])
transforms_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.Resize(224//args.down_scale),
transforms.ToTensor(),
])
train_dataset = datasets.ImageFolder("../dataset/ILSVRC_Birds/train", transform=transforms_train)
train_loader = utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_dataset = datasets.ImageFolder("../dataset/ILSVRC_Birds/val", transform=transforms_test)
test_loader = utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return train_loader, test_loader
def MosquitoDL(args):
init_scale = 1.15
transforms_train = transforms.Compose([
transforms.ColorJitter(brightness=0.1,contrast=0.2,saturation=0.2,hue=0.1),
transforms.RandomAffine(360,scale=[init_scale-0.15,init_scale+0.15]),
transforms.CenterCrop(224),
transforms.Resize(224//args.down_scale),
transforms.ToTensor()
])
transforms_test = transforms.Compose([
transforms.Resize(224//args.down_scale),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder("/media/data/MosquitoDL/TrainVal", transform=transforms_train)
train_loader = utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_dataset = datasets.ImageFolder("/media/data/MosquitoDL/Test", transform=transforms_test)
test_loader = utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return train_loader, test_loader
def MosquitoMultiscale(args):
init_scale = 1.15
transforms_train = transforms.Compose([
transforms.ColorJitter(brightness=0.1,contrast=0.2,saturation=0.2,hue=0.1),
transforms.RandomAffine(360,scale=[init_scale-0.15,init_scale+0.15]),
transforms.CenterCrop(224),
transforms.Resize(224//args.down_scale),
transforms.ToTensor()
])
transforms_test = transforms.Compose([
transforms.Resize(224//args.down_scale),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder("/media/data/MosquitoDL/TrainVal", transform=transforms_train)
train_loader = utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_dataset = datasets.ImageFolder("/media/data/MosquitoDL/Test", transform=transforms_test)
test_loader = utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
return train_loader, test_loader
|
[
"jy_park@inu.ac.kr"
] |
jy_park@inu.ac.kr
|
4df9ea69048a2ad0b8f86d6c9a2ba6a5b4c33d67
|
e06f94c1cf7748352516e15a983df38e0693319d
|
/venv/lib/python3.9/site-packages/web3/pm.py
|
5257db22bd20bce2db4460aaffcd9d35406a644c
|
[
"MIT"
] |
permissive
|
Ruben1701/Blockchain-for-Permission-Management
|
3fcf2c2fad62a1219715cb106ef11aed857d8a71
|
f063f3da2dc9c12d4c68332e309e402a67fd7a8b
|
refs/heads/main
| 2023-06-05T14:41:05.237873
| 2021-06-26T21:13:38
| 2021-06-26T21:13:38
| 347,065,325
| 0
| 1
|
MIT
| 2021-05-26T11:49:34
| 2021-03-12T12:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 21,146
|
py
|
from abc import (
ABC,
abstractmethod,
)
import json
from pathlib import (
Path,
)
from typing import (
Any,
Dict,
Iterable,
NamedTuple,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from eth_typing import (
URI,
Address,
ChecksumAddress,
ContractName,
Manifest,
)
from eth_utils import (
is_canonical_address,
is_checksum_address,
to_checksum_address,
to_text,
to_tuple,
)
from ethpm import (
ASSETS_DIR,
Package,
)
from ethpm.uri import (
is_supported_content_addressed_uri,
resolve_uri_contents,
)
from ethpm.validation.manifest import (
validate_manifest_against_schema,
validate_raw_manifest_format,
)
from ethpm.validation.package import (
validate_package_name,
validate_package_version,
)
from web3 import Web3
from web3._utils.ens import (
is_ens_name,
)
from web3.exceptions import (
InvalidAddress,
ManifestValidationError,
NameNotFound,
PMError,
)
from web3.module import (
Module,
)
from web3.types import (
ENS,
)
# Package Management is still in alpha, and its API is likely to change, so it
# is not automatically available on a web3 instance. To use the `PM` module,
# please enable the package management API on an individual web3 instance.
#
# >>> from web3.auto import w3
# >>> w3.pm
# AttributeError: The Package Management feature is disabled by default ...
# >>> w3.enable_unstable_package_management_api()
# >>> w3.pm
# <web3.pm.PM at 0x....>
T = TypeVar("T")
class ReleaseData(NamedTuple):
package_name: str
version: str
manifest_uri: URI
class ERC1319Registry(ABC):
"""
The ERC1319Registry class is a base class for all registry implementations to inherit from. It
defines the methods specified in `ERC 1319 <https://github.com/ethereum/EIPs/issues/1319>`__.
All of these methods are prefixed with an underscore, since they are not intended to be
accessed directly, but rather through the methods on ``web3.pm``. They are unlikely to change,
but must be implemented in a `ERC1319Registry` subclass in order to be compatible with the
`PM` module. Any custom methods (eg. not definied in ERC1319) in a subclass
should *not* be prefixed with an underscore.
All of these methods must be implemented in any subclass in order to work with `web3.pm.PM`.
Any implementation specific logic should be handled in a subclass.
"""
@abstractmethod
def __init__(self, address: Address, w3: Web3) -> None:
"""
Initializes the class with the on-chain address of the registry, and a web3 instance
connected to the chain where the registry can be found.
Must set the following properties...
* ``self.registry``: A `web3.contract` instance of the target registry.
* ``self.address``: The address of the target registry.
* ``self.w3``: The *web3* instance connected to the chain where the registry can be found.
"""
pass
#
# Write API
#
@abstractmethod
def _release(self, package_name: str, version: str, manifest_uri: str) -> bytes:
"""
Returns the releaseId created by successfully adding a release to the registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
* ``manifest_uri``: URI location of a manifest which details the release contents
"""
pass
#
# Read API
#
@abstractmethod
def _get_package_name(self, package_id: bytes) -> str:
"""
Returns the package name associated with the given package id, if the
package id exists on the connected registry.
* Parameters:
* ``package_id``: 32 byte package identifier.
"""
pass
@abstractmethod
def _get_all_package_ids(self) -> Iterable[bytes]:
"""
Returns a tuple containing all of the package ids found on the connected registry.
"""
pass
@abstractmethod
def _get_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 bytes release id associated with the given package name and version,
if the release exists on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
"""
pass
@abstractmethod
def _get_all_release_ids(self, package_name: str) -> Iterable[bytes]:
"""
Returns a tuple containg all of the release ids belonging to the given package name,
if the package has releases on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
"""
pass
@abstractmethod
def _get_release_data(self, release_id: bytes) -> ReleaseData:
"""
Returns a tuple containing (package_name, version, manifest_uri) for the given release id,
if the release exists on the connected registry.
* Parameters:
* ``release_id``: 32 byte release identifier.
"""
pass
@abstractmethod
def _generate_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 byte release identifier that *would* be associated with the given
package name and version according to the registry's hashing mechanism.
The release *does not* have to exist on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
"""
pass
@abstractmethod
def _num_package_ids(self) -> int:
"""
Returns the number of packages that exist on the connected registry.
"""
pass
@abstractmethod
def _num_release_ids(self, package_name: str) -> int:
"""
Returns the number of releases found on the connected registry,
that belong to the given package name.
* Parameters:
* ``package_name``: Valid package name according the spec.
"""
pass
@classmethod
@abstractmethod
def deploy_new_instance(cls: Type[T], w3: Web3) -> T:
"""
Class method that returns a newly deployed instance of ERC1319Registry.
* Parameters:
* ``w3``: Web3 instance on which to deploy the new registry.
"""
pass
BATCH_SIZE = 100
class SimpleRegistry(ERC1319Registry):
"""
This class represents an instance of the `Solidity Reference Registry implementation
<https://github.com/ethpm/solidity-registry>`__.
"""
def __init__(self, address: ChecksumAddress, w3: Web3) -> None:
abi = get_simple_registry_manifest()["contractTypes"]["PackageRegistry"][
"abi"
]
self.registry = w3.eth.contract(address=address, abi=abi)
self.address = address
self.w3 = w3
def _release(self, package_name: str, version: str, manifest_uri: str) -> bytes:
tx_hash = self.registry.functions.release(
package_name, version, manifest_uri
).transact()
self.w3.eth.wait_for_transaction_receipt(tx_hash)
return self._get_release_id(package_name, version)
def _get_package_name(self, package_id: bytes) -> str:
package_name = self.registry.functions.getPackageName(package_id).call()
return package_name
@to_tuple
def _get_all_package_ids(self) -> Iterable[bytes]:
num_packages = self._num_package_ids()
pointer = 0
while pointer < num_packages:
new_ids, new_pointer = self.registry.functions.getAllPackageIds(
pointer,
(pointer + BATCH_SIZE)
).call()
if not new_pointer > pointer:
break
yield from reversed(new_ids)
pointer = new_pointer
def _get_release_id(self, package_name: str, version: str) -> bytes:
return self.registry.functions.getReleaseId(package_name, version).call()
@to_tuple
def _get_all_release_ids(self, package_name: str) -> Iterable[bytes]:
num_releases = self._num_release_ids(package_name)
pointer = 0
while pointer < num_releases:
new_ids, new_pointer = self.registry.functions.getAllReleaseIds(
package_name,
pointer,
(pointer + BATCH_SIZE)
).call()
if not new_pointer > pointer:
break
yield from reversed(new_ids)
pointer = new_pointer
def _get_release_data(self, release_id: bytes) -> ReleaseData:
name, version, uri = self.registry.functions.getReleaseData(release_id).call()
return ReleaseData(name, version, uri)
def _generate_release_id(self, package_name: str, version: str) -> bytes:
return self.registry.functions.generateReleaseId(package_name, version).call()
def _num_package_ids(self) -> int:
return self.registry.functions.numPackageIds().call()
def _num_release_ids(self, package_name: str) -> int:
return self.registry.functions.numReleaseIds(package_name).call()
@classmethod
def deploy_new_instance(cls, w3: Web3) -> 'SimpleRegistry':
manifest = get_simple_registry_manifest()
registry_package = Package(manifest, w3)
registry_factory = registry_package.get_contract_factory(ContractName("PackageRegistry"))
tx_hash = registry_factory.constructor().transact()
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
return cls(tx_receipt["contractAddress"], w3)
class PM(Module):
"""
The PM module will work with any subclass of ``ERC1319Registry``, tailored to a particular
implementation of `ERC1319 <https://github.com/ethereum/EIPs/issues/1319>`__, set as
its ``registry`` attribute.
"""
def get_package_from_manifest(self, manifest: Manifest) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the given manifest.
* Parameters:
* ``manifest``: A dict representing a valid manifest
"""
return Package(manifest, self.web3)
def get_package_from_uri(self, manifest_uri: URI) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the Manifest stored at the URI.
If you want to use a specific IPFS backend, set ``ETHPM_IPFS_BACKEND_CLASS``
to your desired backend. Defaults to Infura IPFS backend.
* Parameters:
* ``uri``: Must be a valid content-addressed URI
"""
return Package.from_uri(manifest_uri, self.web3)
def get_local_package(self, package_name: str, ethpm_dir: Path = None) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the Manifest found at the package name in your local ethpm_dir.
* Parameters:
* ``package_name``: Must be the name of a package installed locally.
* ``ethpm_dir``: Path pointing to the target ethpm directory (optional).
"""
if not ethpm_dir:
ethpm_dir = Path.cwd() / '_ethpm_packages'
if not ethpm_dir.name == "_ethpm_packages" or not ethpm_dir.is_dir():
raise PMError(f"{ethpm_dir} is not a valid ethPM packages directory.")
local_packages = [pkg.name for pkg in ethpm_dir.iterdir() if pkg.is_dir()]
if package_name not in local_packages:
raise PMError(
f"Package: {package_name} not found in {ethpm_dir}. "
f"Available packages include: {local_packages}."
)
target_manifest = json.loads(
(ethpm_dir / package_name / "manifest.json").read_text()
)
return self.get_package_from_manifest(target_manifest)
def set_registry(self, address: Union[Address, ChecksumAddress, ENS]) -> None:
"""
Sets the current registry used in ``web3.pm`` functions that read/write to an on-chain
registry. This method accepts checksummed/canonical addresses or ENS names. Addresses
must point to an on-chain instance of an ERC1319 registry implementation.
To use an ENS domain as the address, make sure a valid ENS instance set as ``web3.ens``.
* Parameters:
* ``address``: Address of on-chain Registry.
"""
if is_canonical_address(address):
addr_string = to_text(address)
self.registry = SimpleRegistry(to_checksum_address(addr_string), self.web3)
elif is_checksum_address(address):
self.registry = SimpleRegistry(cast(ChecksumAddress, address), self.web3)
elif is_ens_name(address):
self._validate_set_ens()
addr_lookup = self.web3.ens.address(str(address))
if not addr_lookup:
raise NameNotFound(
"No address found after ENS lookup for name: {0}.".format(address)
)
self.registry = SimpleRegistry(addr_lookup, self.web3)
else:
raise PMError(
"Expected a canonical/checksummed address or ENS name for the address, "
"instead received {0}.".format(type(address))
)
def deploy_and_set_registry(self) -> ChecksumAddress:
"""
Returns the address of a freshly deployed instance of `SimpleRegistry`
and sets the newly deployed registry as the active registry on ``web3.pm.registry``.
To tie your registry to an ENS name, use web3's ENS module, ie.
.. code-block:: python
w3.ens.setup_address(ens_name, w3.pm.registry.address)
"""
self.registry = SimpleRegistry.deploy_new_instance(self.web3)
return to_checksum_address(self.registry.address)
def release_package(
self, package_name: str, version: str, manifest_uri: URI
) -> bytes:
"""
Returns the release id generated by releasing a package on the current registry.
Requires ``web3.PM`` to have a registry set. Requires ``web3.eth.default_account``
to be the registry owner.
* Parameters:
* ``package_name``: Must be a valid package name, matching the given manifest.
* ``version``: Must be a valid package version, matching the given manifest.
* ``manifest_uri``: Must be a valid content-addressed URI. Currently, only IPFS
and Github content-addressed URIs are supported.
"""
validate_is_supported_manifest_uri(manifest_uri)
raw_manifest = to_text(resolve_uri_contents(manifest_uri))
validate_raw_manifest_format(raw_manifest)
manifest = json.loads(raw_manifest)
validate_manifest_against_schema(manifest)
if package_name != manifest["name"]:
raise ManifestValidationError(
f"Provided package name: {package_name} does not match the package name "
f"found in the manifest: {manifest['name']}."
)
if version != manifest["version"]:
raise ManifestValidationError(
f"Provided package version: {version} does not match the package version "
f"found in the manifest: {manifest['version']}."
)
self._validate_set_registry()
return self.registry._release(package_name, version, manifest_uri)
@to_tuple
def get_all_package_names(self) -> Iterable[str]:
"""
Returns a tuple containing all the package names available on the current registry.
"""
self._validate_set_registry()
package_ids = self.registry._get_all_package_ids()
for package_id in package_ids:
yield self.registry._get_package_name(package_id)
def get_package_count(self) -> int:
"""
Returns the number of packages available on the current registry.
"""
self._validate_set_registry()
return self.registry._num_package_ids()
def get_release_count(self, package_name: str) -> int:
"""
Returns the number of releases of the given package name available on the current registry.
"""
validate_package_name(package_name)
self._validate_set_registry()
return self.registry._num_release_ids(package_name)
def get_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 byte identifier of a release for the given package name and version,
if they are available on the current registry.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
return self.registry._get_release_id(package_name, version)
@to_tuple
def get_all_package_releases(self, package_name: str) -> Iterable[Tuple[str, str]]:
"""
Returns a tuple of release data (version, manifest_ur) for every release of the
given package name available on the current registry.
"""
validate_package_name(package_name)
self._validate_set_registry()
release_ids = self.registry._get_all_release_ids(package_name)
for release_id in release_ids:
release_data = self.registry._get_release_data(release_id)
yield (release_data.version, release_data.manifest_uri)
def get_release_id_data(self, release_id: bytes) -> ReleaseData:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
release id, *if* it is available on the current registry.
* Parameters:
* ``release_id``: 32 byte release identifier
"""
self._validate_set_registry()
return self.registry._get_release_data(release_id)
def get_release_data(self, package_name: str, version: str) -> ReleaseData:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_id = self.registry._get_release_id(package_name, version)
return self.get_release_id_data(release_id)
def get_package(self, package_name: str, version: str) -> Package:
"""
Returns a ``Package`` instance, generated by the ``manifest_uri`` associated with the
given package name and version, if they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_data = self.get_release_data(package_name, version)
return self.get_package_from_uri(URI(release_data.manifest_uri))
def _validate_set_registry(self) -> None:
try:
self.registry
except AttributeError:
raise PMError(
"web3.pm does not have a set registry. "
"Please set registry with either: "
"web3.pm.set_registry(address) or "
"web3.pm.deploy_and_set_registry()"
)
if not isinstance(self.registry, ERC1319Registry):
raise PMError(
"web3.pm requires an instance of a subclass of ERC1319Registry "
"to be set as the web3.pm.registry attribute. Instead found: "
f"{type(self.registry)}."
)
def _validate_set_ens(self) -> None:
if not self.web3:
raise InvalidAddress(
"Could not look up ENS address because no web3 " "connection available"
)
elif not self.web3.ens:
raise InvalidAddress(
"Could not look up ENS address because web3.ens is " "set to None"
)
def get_simple_registry_manifest() -> Dict[str, Any]:
return json.loads((ASSETS_DIR / "simple-registry" / "v3.json").read_text())
def validate_is_supported_manifest_uri(uri: URI) -> None:
if not is_supported_content_addressed_uri(uri):
raise ManifestValidationError(
f"URI: {uri} is not a valid content-addressed URI. "
"Currently only IPFS and Github content-addressed URIs are supported."
)
|
[
"35116541+Ruben170@users.noreply.github.com"
] |
35116541+Ruben170@users.noreply.github.com
|
f60622ab5bd5f34311c951a2a60f776f25a2aa47
|
33a50bb13812090a36257078522b798762978c66
|
/top/api/rest/SimbaNonsearchAllplacesGetRequest.py
|
6f07639d12207a871566ac06ac186d09de431e25
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949
| 2014-10-25T02:28:15
| 2014-10-25T02:28:15
| 23,178,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
'''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class SimbaNonsearchAllplacesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.simba.nonsearch.allplaces.get'
|
[
"262708239@qq.com"
] |
262708239@qq.com
|
a345c0e0388b1fd434e0a6b9380ebd6b966faf03
|
4406c089d0afde7ffee340ad002c5786a18131a8
|
/ubuntu/dup_free_encode/user_db_util.py
|
8e2daf0be474c8ff021be470d90486f2a64dced4
|
[] |
no_license
|
Sanctorum003/LotApp
|
4772420d6ea3a26f43c4ab58b48b088829bd5352
|
69991cda1223daff642ec2ae6e3c03be66125416
|
refs/heads/master
| 2020-03-13T05:02:55.864020
| 2018-04-25T08:24:14
| 2018-04-25T08:24:14
| 130,974,758
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,700
|
py
|
# 对user_info.db 这个数据库的各种操作
import sqlite3 as sq
import os
import json
from ucs_db_util import search_ucs_openId_to_carID
import Time_utils as time_tool
user_db_name = 'user_info.db'
if not os.path.exists(user_db_name) :
flag = 0 #not existed
else:
flag = 1
try:
conn = sq.connect(user_db_name)
cur = conn.cursor()
except Exception as err:
print(err)
if flag == 0:
# cur_state => 0代表已经完成并且发送信息 ,1代表进行中, 2代表已完成但未发送信息
sql = """
CREATE TABLE user_info(
nickName text,
avatarUrl text,
gender text,
city text,
province text,
country text,
language text,
openId text,
balance double,
start_time text,
cur_state int,
Primary Key(openId)
)
"""
cur.execute(sql)
# 关闭这个数据库
def close_user_db():
cur.close()
conn.close()
print('user_Info database has closed successfully .')
# 插入一条新的record
def insert_user_data(data,balance,st='-',cur_state=0):
if data is '':
return False
try:
cur.execute('insert into user_info(nickName,avatarUrl,gender,city,province,country,language,openId,balance,start_time,cur_state) \
values("{0}","{1}","{2}","{3}","{4}","{5}","{6}","{7}","{8}","{9}","{10}")'.format( \
data['nickName'],data['avatarUrl'],data['gender'],data['city'],data['province'],data['country'],data['language'],data['openId'],balance,st,cur_state))
conn.commit()
except Exception as err:
print(err)
return False
return True
# 询问指定用户是否存在
def query_user_existed(openId):
cur.execute('select openId from user_info where openId = "{0}" '.format(openId))
res = cur.fetchone()
#res: None or ('oUkv30GoL4y0lj6jUBNRK3AzJ-Yc',)
if not res:
return False
return True
# 更新指定用户的指定属性值
def update_user_data(openId,key,value):
try:
res = cur.execute('update user_info set '+key+'= "{0}" where openId = "{1}" '\
.format(value,openId))
except Exception as err:
print(err)
return False
#print('response:',res)
#response: <sqlite3.Cursor object at 0x7f215debdb20>
conn.commit()
return True
# 把用户的所有信息从db中提取出来,返回一个bytes字符串 即-> "dict"
def get_user_info_from_db(openId):
res_to_wx ={}
db_attributes_list = ['nickName','avatarUrl','gender','city','province','country','language','openId','balance','start_time','cur_state']
cur.execute('select * from user_info where openId = "{0}"'.format(openId))
userInfo_query_result = cur.fetchone()
assert( len(db_attributes_list) == len(userInfo_query_result) )
dic_list = zip(db_attributes_list, userInfo_query_result)
for key,value in dic_list:
res_to_wx[key] = value
# 增加carID
res_to_wx['carID'] = search_ucs_openId_to_carID(openId,'carID')
last_time = -1
# 说明找到了
if res_to_wx['carID'] :
cur_Time = time_tool.get_Current_Time()
last_time = time_tool.time_subtract(res_to_wx['start_time'], cur_Time)
res_to_wx['last_time'] = last_time
res_to_wx['header']='user_Info'
return res_to_wx
# 获取用户的某一个属性的信息
def search_from_user_db(openId,key):
cur.execute('select '+key+' from user_info where openId = "{0}"'.format(openId))
res = cur.fetchone()
if not res:
return None
return res[0]
|
[
"lushuchengsky@126.com"
] |
lushuchengsky@126.com
|
05d8af9bcacd6c3653138d5f6101b153625fb68c
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Docs/Controlling the Keyboard and Mouse/typer/docs_src/commands/callback/tutorial001.py
|
5438b3ead7b25f5be9db58189b57b8e6e3f410a3
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f0921ed2e19ca15b1c6e7817d91c7dbefd69a5a39c79520e0d5e519b11cdd10c
size 756
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
866d720954dc7618d46066cfecbafeaeda55048b
|
8e9f19bf81adc3954b6c723c6a976335b53533a6
|
/devtools/gha/get_base_and_image_tags.py
|
4b301d955c7b22ca7d5541bc1d9fbc5600a46756
|
[
"MIT"
] |
permissive
|
evalf/nutils
|
e7f4c3104004fbc14762447c4ff56b60de9dff03
|
f28fb337839644eaf3285da99e8a89d1256126f5
|
refs/heads/master
| 2023-08-16T16:13:36.568328
| 2023-08-15T12:28:54
| 2023-08-15T12:29:20
| 13,886,623
| 54
| 30
|
MIT
| 2023-09-07T10:32:10
| 2013-10-26T16:17:57
|
Python
|
UTF-8
|
Python
| false
| false
| 776
|
py
|
import os
import argparse
from .. import log
from ..container import get_container_tag_from_ref
argparse.ArgumentParser().parse_args()
if os.environ.get('GITHUB_EVENT_NAME') == 'pull_request':
ref = os.environ.get('GITHUB_BASE_REF')
if not ref:
raise SystemExit('`GITHUB_BASE_REF` environment variable is empty')
base = '_base_' + get_container_tag_from_ref('refs/heads/' + ref)
if sha := os.environ.get("GITHUB_SHA", ''):
image = '_git_' + sha
else:
image = '_pr'
else:
ref = os.environ.get('GITHUB_REF')
if not ref:
raise SystemExit('`GITHUB_REF` environment variable is empty')
image = get_container_tag_from_ref(ref)
base = '_base_' + image
log.set_output('base', base)
log.set_output('image', image)
|
[
"joostvanzwieten@evalf.com"
] |
joostvanzwieten@evalf.com
|
1f81a14852d880d087df7c5068d2148fe40d9429
|
d2f08121142346a1fe823c4e99bb2f04a74023a8
|
/interface/views.py
|
d001443c2f6c0dce56cd580d0a70dc0ab9c190ad
|
[] |
no_license
|
supertopdev/django-table
|
4efb59e5ebda7ac13721ad436f651d0b0a70296d
|
059c768cc1d9ead18f508046cf807d05a2f2ba50
|
refs/heads/master
| 2020-06-26T23:25:27.439663
| 2019-08-02T22:31:50
| 2019-08-02T22:31:50
| 199,784,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,755
|
py
|
from django.shortcuts import render, HttpResponseRedirect
from django.core.paginator import Paginator
from django.db.models import Q
from .models import Centris
from django.http import JsonResponse
from functools import reduce
from operator import __or__ as OR
# Create your views here.
def index(request):
centris = Centris.objects.all()
paginator = Paginator(centris, 15)
page = request.GET.get('page')
centris = paginator.get_page(page)
context = {
'centris': centris
}
return render(request, 'interface/index.html', context=context)
def search(request):
keys = ["pid", "centris_title", "centris_title_business", "centris_title_for", "address", "price", "workscore", "beds_baths", "geo_cordinates", "description",
"agent_name", "built_year", "construction_year", "available_area", "lot_area",
"operation_type",
"fireplace_stove", "additional_features", "potential_gross_revenue", "main_unit", "residential_units",
"unit_number", "parking", "building_area", "use_property", "zoning",
"residential_unit",
"business_type", "intergenerational", "building_style", "pool", "condominium", "gross_area",
"net_area", "property_current_active"]
if request.method == 'POST':
item_list = {}
for key in keys:
item_list[key] = request.POST.getlist(key)
# if (item_list['centris_title'] is []):
# if (item_list['centris_title_business'][0] == "Commercial"):
# centris = Centris.objects.filter(reduce(lambda x, y: x & y, [Q(centris_title__contains='Commercial') | Q(centris_title__contains='Industrial') & Q(centris_title__contains=item_list['centris_title_for'][0])]))
# if item_list:
# request.session['item_list'] = item_list
# paginator = Paginator(centris, 15)
# page = request.GET.get('page')
#
# centris = paginator.get_page(page)
# context = {
# 'centris': centris
# }
# return render(request, 'interface/index.html', context=context)
# else:
# centris = Centris.objects.filter(reduce(lambda x, y: x & y, [~Q(centris_title__contains='Commercial') & ~Q(centris_title__contains='Industrial') & Q(centris_title__contains=item_list['centris_title_for'][0])]))
# if item_list:
# request.session['item_list'] = item_list
# paginator = Paginator(centris, 15)
# page = request.GET.get('page')
#
# centris = paginator.get_page(page)
# context = {
# 'centris': centris
# }
#
# return render(request, 'interface/index.html', context=context)
# else:
if (item_list['centris_title_business'][0] == "Commercial"):
centris = Centris.objects.filter(reduce(lambda x, y: x & y, [
Q(centris_title__contains='Commercial') | Q(centris_title__contains='Industrial') & Q(
centris_title__contains=item_list['centris_title_for'][0])]))
else:
centris = Centris.objects.filter(reduce(lambda x, y: x & y, [
~Q(centris_title__contains='Commercial') & ~Q(centris_title__contains='Industrial') & Q(
centris_title__contains=item_list['centris_title_for'][0])]))
# centris_res = centris.filter(reduce(OR, [Q(centris_title__contains=title) for title in item_list['centris_title']]))
# if item_list:
# request.session['item_list'] = item_list
# paginator = Paginator(centris_res, 15)
# page = request.GET.get('page')
#
# centris_res = paginator.get_page(page)
# context = {
# 'centris': centris_res
# }
#
# return render(request, 'interface/index.html', context=context)
queries = [Q(pid__contains=pid) for pid in item_list['pid']] + \
[Q(address__contains=address) for address in item_list['address']] + \
[Q(price__contains=price) for price in item_list['price']] + \
[Q(workscore__contains=workscore) for workscore in item_list['workscore']] + \
[Q(beds_baths__contains=beds_baths) for beds_baths in item_list['beds_baths']] + \
[Q(geo_cordinates__contains=geo_cordinates) for geo_cordinates in
item_list['geo_cordinates']] + \
[Q(description__contains=description) for description in item_list['description']] + \
[Q(agent_name__contains=agent_name) for agent_name in item_list['agent_name']] + \
[Q(built_year__contains=built_year) for built_year in item_list['built_year']] + \
[Q(construction_year__contains=construction_year) for construction_year in
item_list['construction_year']] + \
[Q(available_area__contains=available_area) for available_area in
item_list['available_area']] + \
[Q(lot_area__contains=lot_area) for lot_area in item_list['lot_area']] + \
[Q(operation_type__contains=operation_type) for operation_type in
item_list['operation_type']] + \
[Q(fireplace_stove__contains=fireplace_stove) for fireplace_stove in
item_list['fireplace_stove']] + \
[Q(additional_features__contains=additional_features) for additional_features in
item_list['additional_features']] + \
[Q(potential_gross_revenue__contains=potential_gross_revenue) for potential_gross_revenue in
item_list['potential_gross_revenue']] + \
[Q(main_unit__contains=main_unit) for main_unit in item_list['main_unit']] + \
[Q(residential_units__contains=residential_units) for residential_units in
item_list['residential_units']] + \
[Q(unit_number__contains=unit_number) for unit_number in item_list['unit_number']] + \
[Q(parking__contains=parking) for parking in item_list['parking']] + \
[Q(building_area__contains=building_area) for building_area in item_list['building_area']] + \
[Q(use_property__contains=use_property) for use_property in item_list['use_property']] + \
[Q(zoning__contains=zoning) for zoning in item_list['zoning']] + \
[Q(residential_unit__contains=residential_unit) for residential_unit in
item_list['residential_unit']] + \
[Q(business_type__contains=business_type) for business_type in item_list['business_type']] + \
[Q(intergenerational__contains=intergenerational) for intergenerational in
item_list['intergenerational']] + \
[Q(building_style__contains=building_style) for building_style in
item_list['building_style']] + \
[Q(pool__contains=pool) for pool in item_list['pool']] + \
[Q(condominium__contains=condominium) for condominium in item_list['condominium']] + \
[Q(gross_area__contains=gross_area) for gross_area in item_list['gross_area']] + \
[Q(net_area__contains=net_area) for net_area in item_list['net_area']] + \
[Q(property_current_active__contains=property_current_active) for property_current_active in
item_list['property_current_active']]
# query = reduce(lambda x, y: x & y, queries)
queries = [Q(centris_title__contains=title) for title in item_list['centris_title']]
#
centris_res = centris.filter(reduce(OR, queries))
if item_list:
request.session['item_list'] = item_list
if len(centris_res) > 1:
response_str = 'There are {count} centris.'.format(count=len(centris))
paginator = Paginator(centris_res, 15)
page = request.GET.get('page')
centris_res = paginator.get_page(page)
context = {
'centris': centris_res
}
return render(request, 'interface/index.html', context=context)
else:
item_list = request.session.get('item_list')
if (item_list['centris_title_business'][0] == "Commercial"):
centris = Centris.objects.filter(reduce(lambda x, y: x & y, [
Q(centris_title__contains='Commercial') | Q(centris_title__contains='Industrial') & Q(
centris_title__contains=item_list['centris_title_for'][0])]))
paginator = Paginator(centris, 15)
page = request.GET.get('page')
centris = paginator.get_page(page)
context = {
'centris': centris
}
return render(request, 'interface/index.html', context=context)
else:
centris = Centris.objects.filter(reduce(lambda x, y: x & y, [
~Q(centris_title__contains='Commercial') & ~Q(centris_title__contains='Industrial') & Q(
centris_title__contains=item_list['centris_title_for'][0])]))
paginator = Paginator(centris, 15)
page = request.GET.get('page')
centris = paginator.get_page(page)
context = {
'centris': centris
}
return render(request, 'interface/index.html', context=context)
if (item_list['centris_title'][0] is not []):
centris_res = centris.filter(
reduce(OR, [Q(centris_title__contains=title) for title in item_list['centris_title']]))
paginator = Paginator(centris_res, 15)
page = request.GET.get('page')
centris_res = paginator.get_page(page)
context = {
'centris': centris_res
}
return render(request, 'interface/index.html', context=context)
queries = [Q(pid__contains=pid) for pid in item_list['pid']] + \
[Q(address__contains=address) for address in item_list['address']] + \
[Q(price__contains=price) for price in item_list['price']] + \
[Q(workscore__contains=workscore) for workscore in item_list['workscore']] + \
[Q(beds_baths__contains=beds_baths) for beds_baths in item_list['beds_baths']] + \
[Q(geo_cordinates__contains=geo_cordinates) for geo_cordinates in
item_list['geo_cordinates']] + \
[Q(description__contains=description) for description in item_list['description']] + \
[Q(agent_name__contains=agent_name) for agent_name in item_list['agent_name']] + \
[Q(built_year__contains=built_year) for built_year in item_list['built_year']] + \
[Q(construction_year__contains=construction_year) for construction_year in
item_list['construction_year']] + \
[Q(available_area__contains=available_area) for available_area in
item_list['available_area']] + \
[Q(lot_area__contains=lot_area) for lot_area in item_list['lot_area']] + \
[Q(operation_type__contains=operation_type) for operation_type in
item_list['operation_type']] + \
[Q(fireplace_stove__contains=fireplace_stove) for fireplace_stove in
item_list['fireplace_stove']] + \
[Q(additional_features__contains=additional_features) for additional_features in
item_list['additional_features']] + \
[Q(potential_gross_revenue__contains=potential_gross_revenue) for potential_gross_revenue in
item_list['potential_gross_revenue']] + \
[Q(main_unit__contains=main_unit) for main_unit in item_list['main_unit']] + \
[Q(residential_units__contains=residential_units) for residential_units in
item_list['residential_units']] + \
[Q(unit_number__contains=unit_number) for unit_number in item_list['unit_number']] + \
[Q(parking__contains=parking) for parking in item_list['parking']] + \
[Q(building_area__contains=building_area) for building_area in item_list['building_area']] + \
[Q(use_property__contains=use_property) for use_property in item_list['use_property']] + \
[Q(zoning__contains=zoning) for zoning in item_list['zoning']] + \
[Q(residential_unit__contains=residential_unit) for residential_unit in
item_list['residential_unit']] + \
[Q(business_type__contains=business_type) for business_type in item_list['business_type']] + \
[Q(intergenerational__contains=intergenerational) for intergenerational in
item_list['intergenerational']] + \
[Q(building_style__contains=building_style) for building_style in
item_list['building_style']] + \
[Q(pool__contains=pool) for pool in item_list['pool']] + \
[Q(condominium__contains=condominium) for condominium in item_list['condominium']] + \
[Q(gross_area__contains=gross_area) for gross_area in item_list['gross_area']] + \
[Q(net_area__contains=net_area) for net_area in item_list['net_area']] + \
[Q(property_current_active__contains=property_current_active) for property_current_active in
item_list['property_current_active']]
queries_title = [Q(centris_title__contains=title) for title in item_list['centris_title']]
# # query = reduce(lambda x, y: x & y, queries)
centris_res = centris.filter(reduce(OR, queries_title))
#
# centris.filter(reduce(lambda x, y: x & y, queries))
paginator = Paginator(centris_res, 15)
page = request.GET.get('page')
centris_res = paginator.get_page(page)
context = {
'centris': centris_res
}
return render(request, 'interface/index.html', context=context)
def delete(request):
id = int(request.GET.get('id'))
centris = Centris.objects.get(pk=id)
centris.delete()
return JsonResponse({'success': 'success'})
|
[
"supertopdev@gmail.com"
] |
supertopdev@gmail.com
|
c7e52c8a090a63d1d7de0a9993f663e993710bb5
|
0b350fe4e426bed69a27fdf8802cdd5632ea3077
|
/WebAnalytics/Growth.py
|
cff0c3f969950166fa170d56ef9cc62e96bba7dd
|
[] |
no_license
|
kevinaloys/Kpython
|
e821cdeffbfcc0fcddfc561cd1aa26e964d154f0
|
49879ccc3cf3311a06181b0e9afe3afaa0f4c081
|
refs/heads/master
| 2021-01-16T19:13:59.426911
| 2014-12-07T01:36:10
| 2014-12-07T01:36:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
#coding: utf-8
# Growth
# Given two integer numbers d1 and d2 representing the unique visitors on a website on the first and second day since launch
# Your task is to
# write a function that prints to the standard output (stdout) the word:
# "Increase" if the number of unique visitors is higher or at least equal with the ones in the first day
# "Decrease" otherwise
# Note that your function will receive the following arguments:
# d1
# which is an integer representing the number of unique visitors for the first day
# d2
# which is an integer representing the number of unique visitors for the second day
# Data constraints
# the integer numbers will be in the [0 .. 1,000,000] range
__author__="Kevin Aloysius"
def check_growth(d1,d2):
if(d1>d2):
print "Decrease"
else:
print "Increase"
check_growth(4000,1000)
|
[
"kevinaloys@rocketmail.com"
] |
kevinaloys@rocketmail.com
|
c10fbf1e704a93a27b39f55a903786ffa970dab7
|
f9d942b2fed83e9d6c101ebaedc1d4b36dee2754
|
/logistics/tests/util.py
|
b392e9568f05b3984dcf23beedbd376f6d40b26b
|
[] |
no_license
|
unicefuganda/rapidsms-logistics
|
7cde229ac2619366d253d099c0f222eb96b1468e
|
7d9609a7b9d6fa3f4502aba52ab56acc23a6e928
|
refs/heads/master
| 2020-12-25T05:26:59.459389
| 2012-12-17T12:00:52
| 2012-12-17T12:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from rapidsms.tests.scripted import TestScript
from logistics import loader as logi_loader
def load_test_data():
logi_loader.init_reports()
logi_loader.init_supply_point_types()
logi_loader.init_test_location_and_supplypoints()
logi_loader.init_test_product_and_stock()
|
[
"rluk@dimagi.com"
] |
rluk@dimagi.com
|
1ab83510c1be8eae59245f9899325487caf66b40
|
335feaa2fed94353cad975b3108fc669c4f39102
|
/app/config/files/hearing_loss_v2.pyt
|
8bb78a60a43c38e246a58f041efd94ea0877002a
|
[
"Apache-2.0"
] |
permissive
|
okriuchykhin/anfisa
|
461beff3d4657b2895550811b8858ee27390f8de
|
cda08e649c5a313c7d52f9b4426558c7388a73b0
|
refs/heads/master
| 2020-07-30T11:10:47.503800
| 2019-09-27T04:33:52
| 2019-09-27T04:33:52
| 210,208,650
| 0
| 0
|
Apache-2.0
| 2019-09-22T20:18:27
| 2019-09-22T20:18:27
| null |
UTF-8
|
Python
| false
| false
| 1,111
|
pyt
|
#Exclude variants not in hearing loss panel
if Panels not in {All_Hearing_Loss}:
return False
#Include Present in HGMD as "DM"
if HGMD_Tags in {"DM"}:
return True
# Exclude common variants AF> 5%
if gnomAD_AF >= 0.05:
return False
#Exclude variants farther then 5pb from intronic/exonic border
if (not Region in {"exon"}) and Dist_from_Exon >= 6:
return False
#2.a. Include if present in ClinVar as: Path, Likely Path, VUS
# (worst annotation, unless annotated benign by trusted submitter')
if (Clinvar_Benign in {"False"}):
return True
# 2.b. Include All de novo variants
if (Callers in {"BGM_BAYES_DE_NOVO"}):
return True
# 2.c. Include all potential LOF variants
# (stop-codon, frameshift, canonical splice site).
if (Severity >= 3):
return True
# 3.a. Leave only:
# "Missense", "synonymous" and "splice region" variants
if (Severity <= 0):
return False
#3. Include: AF < 0.0007 (GnomAD Overall)
# And: PopMax < 0.01
# (minimum 2000 alleles total in ancestral group)')
if (gnomAD_AF <= .0007 and gnomAD_PopMax_AF <= .01):
return True
return False
|
[
"michael.bouzinier@kvitneset.com"
] |
michael.bouzinier@kvitneset.com
|
d5f35a2a1fa8e7be68c9d4402074f58c097b436a
|
a532f7e759ea5a2de40185645857155d8dbca5b3
|
/data_analysis/analyze_response.py
|
111cbd8be0766efb6e3ce71d38b57c5c3fbe0aef
|
[] |
no_license
|
PalaashAgrawal/brainscore_eval
|
cbf7ef6db53421c612fb0401a905c97ac0ae8bfe
|
2589b48f95198a827453e699231581f651a17a60
|
refs/heads/master
| 2023-08-28T03:00:19.689126
| 2021-08-04T22:22:45
| 2021-08-04T22:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,997
|
py
|
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from scipy.stats import wilcoxon
import math
import statsmodels.stats.multitest as multitest
# Constants defined here:
category_mapping = {'House': 10, 'Visage': 20, 'Animal': 30, 'Scene': 40, 'Tool': 50, 'Scramble': 90}
comparison_categories_scenes = ['Visage', 'Animal', 'Tool']
comparison_categories_face = ['House', 'Scene', 'Tool']
# 37 and 34 deal with Faces and Scenes respectively in the brain
brodmann_area_mapping = {'V1': 17, 'V2': 18, 'V3': 19, 'V4': 19, 'IT': 20,
'Fusiform Gyrus': 37, 'Parahippocampal Gyrus': 34}
storage_location = Path(
"C:/Users/hsuen/Desktop/bigData/brainscore_img_elec_time_70hz150/")
neural_responses = np.load(storage_location / "neural_responses.npy")
categories = np.load(storage_location / 'stimgroups.npy')
brodmann_areas = np.load(storage_location / "brodmann_areas.npy")
# normalize the activations
# neural_responses = (neural_responses - np.min(neural_responses)) / (np.max(neural_responses) - np.min(neural_responses))
threshold = 1.5
# pass in the brodmann area where you want to get the number of electrodes in
def get_num_electrodes_in_area(area):
brodmann_areas = np.load(storage_location / "brodmann_areas.npy")
area = brodmann_area_mapping[area]
query_area = brodmann_areas == area
return np.sum(query_area)
def get_brodmann_areas():
brodmann_areas = np.load(storage_location / "brodmann_areas.npy")
brodmann_areas = set(brodmann_areas) # gets the unique brodmann areas that are in the electrodes
num_unique = len(brodmann_areas)
return brodmann_areas, num_unique
# categories are: House, Visage, Animal, Scene, Tool, Scramble
# regions: V1, V2, V3, V4, IT, All
# time bin : 1-32, All
def calculate_mean_activation(category, region, time_bin, return_all_electrodes, which_half=None):
if category == 'All':
noise_idx = category_mapping['Scramble']
noise_idx = categories == noise_idx
activation_mean = neural_responses[~noise_idx][:][:]
num_images = np.shape(activation_mean)[0]
if which_half == 'First':
activation_mean = activation_mean[:math.floor(num_images / 2)]
elif which_half == 'Second':
activation_mean = activation_mean[math.floor(num_images / 2):]
activation_mean = np.mean(activation_mean, axis=0) # (11293, 32)
else:
category_num = category_mapping[category]
# CALCULATE THE MEAN
# extract responses for specific category
idx = categories == category_num
activation_mean = neural_responses[idx][:][:] # (images, 11293, 32)
num_images = np.shape(activation_mean)[0]
if which_half == 'First':
activation_mean = activation_mean[:math.floor(num_images / 2)]
elif which_half == 'Second':
activation_mean = activation_mean[math.floor(num_images / 2):]
# compute average for each electrode across all images
activation_mean = np.mean(activation_mean, axis=0) # (11293, 32)
# extract responses for particular electrodes
if region == 'All':
# compute average for all electrodes
if return_all_electrodes:
return activation_mean
activation_mean = np.mean(activation_mean, axis=0) # (32,)
else:
electrode_num = brodmann_area_mapping[region]
idx = brodmann_areas == electrode_num
activation_mean = activation_mean[idx][:]
if return_all_electrodes:
return activation_mean
activation_mean = np.mean(activation_mean, axis=0)
if time_bin == 'All':
return activation_mean
else:
return activation_mean[time_bin]
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wilcoxon.html
# performs the Wilcoxon signed rank test on the differences of two samples
# Attempts to reject the null hypothesis that the difference between the two samples throughout time
# is zero. Thus, if the percent is less than 5%, we can reject it with 95% confidence
# we also correct the p-values using a multiple tests method:
# https://www.statsmodels.org/dev/generated/statsmodels.stats.multitest.multipletests.html
def perform_sign_test(category, region):
top_electrodes = double_screen_electrodes(region, cutoff=threshold)
category_activations = calculate_mean_activation(category, region, 'All', return_all_electrodes=True)
category_activations = category_activations[top_electrodes]
if category == 'Visage':
comparison_categories = comparison_categories_face
elif category == 'Scene':
comparison_categories = comparison_categories_scenes
elif category == 'All':
comparison_categories = ['Scramble']
activations_other = np.zeros((len(comparison_categories), len(category_activations), 32))
for idx, category in enumerate(comparison_categories):
other_act = calculate_mean_activation(category, region, 'All', return_all_electrodes=True)
activations_other[idx] = other_act[top_electrodes]
activations_other = np.mean(activations_other, axis=0)
p_values = []
for idx in range(32):
t_statistic, p_value = wilcoxon(category_activations[:, idx], activations_other[:, idx])
p_values.append(p_value)
# adjust the p-values here
reject, pvals_corrected, none, none = multitest.multipletests(p_values, alpha=0.5, method='hs')
return pvals_corrected
def scenes_and_faces(region):
# there is from a noise baseline
# get the mean activation values for the category
scene_activations = calculate_mean_activation('Scene', region, 'All', return_all_electrodes=False)
face_activations = calculate_mean_activation('Visage', region, 'All', return_all_electrodes=False)
# calculate the activations for all other categories
num_categories = len(category_mapping)
activations_other_scene = np.zeros((len(comparison_categories_scenes), 32))
activations_other_face = np.zeros((len(comparison_categories_face), 32))
idx = 0
for key in category_mapping:
if key not in comparison_categories_scenes:
continue
activations_other_scene[idx][:] = calculate_mean_activation(key, region, 'All', return_all_electrodes=False)
idx += 1
idx = 0
for key in category_mapping:
if key not in comparison_categories_face:
continue
activations_other_face[idx][:] = calculate_mean_activation(key, region, 'All', return_all_electrodes=False)
idx += 1
# average the activations for all other categories
activations_other_scene = np.mean(activations_other_scene, axis=0)
activations_other_face = np.mean(activations_other_scene, axis=0)
# plot the two differences
factor_increase_other_scene = np.divide(scene_activations, activations_other_scene)
factor_increase_other_face = np.divide(face_activations, activations_other_face)
time_bins = np.arange(0, 1000, 31.25)
plt.figure()
plt.plot(time_bins[:30], factor_increase_other_scene[:30])
plt.plot(time_bins[:30], factor_increase_other_face[:30])
plt.title('Factor Increase for Faces and Scenes in region {0}'.format(region))
plt.xlabel('Time in ms')
ax = plt.gca()
ax.legend(['Scene', 'Face'])
plt.show()
def get_top_electrodes(region, cutoff, which_half):
# look at each electrodes response for the category the region is responsible for
if region == 'Fusiform Gyrus':
activations = calculate_mean_activation('Visage', region, 'All', True, which_half)
num_electrodes = np.shape(activations)[0]
activations_other = np.zeros((len(comparison_categories_face), num_electrodes, 32))
idx = 0
for key in category_mapping:
if key not in comparison_categories_face:
continue
activations_other[idx][:][:] = calculate_mean_activation(key, region, 'All', True, which_half)
idx += 1
activations_other = np.mean(activations_other, axis=0)
elif region == 'Parahippocampal Gyrus':
activations = calculate_mean_activation('Scene', region, 'All', True, which_half)
num_electrodes = np.shape(activations)[0]
activations_other = np.zeros((len(comparison_categories_scenes), num_electrodes, 32))
idx = 0
for key in category_mapping:
if key not in comparison_categories_scenes:
continue
activations_other[idx][:][:] = calculate_mean_activation(key, region, 'All', True, which_half)
idx += 1
activations_other = np.mean(activations_other, axis=0)
elif region == 'IT':
activations = calculate_mean_activation('All', region, 'All', True, which_half)
activations_other = calculate_mean_activation('Scramble', region, 'All', True, which_half)
# at this point, activations is of shape (num_electrodes, 32)
# and activations_other is the exact same shape
# what we want to do is to see how much of an increase there
# is when this happens
# now perform the factor division across all the time steps
#factor_increase = activations / activations_other # (num_electrodes, 32)
factor_increase = (activations - activations_other)/ activations
factor_increase = np.abs(factor_increase)
# now we take the mean factor increase across time
factor_increase = np.mean(factor_increase, axis=1) # (num_electrodes, )
# we can screen out the top electrodes
top_electrodes = factor_increase > cutoff
top_electrodes = [i for i, x in enumerate(top_electrodes) if x]
# now we need to see which one of these will work for the other half of the images
return top_electrodes
def double_screen_electrodes(region, cutoff):
first_set = get_top_electrodes(region, cutoff, 'First')
second_set = get_top_electrodes(region, cutoff, 'Second')
return list(set(first_set) & set(second_set))
def plot_top_electrodes(region):
# gives us the indices of the top electrodes for our given region
top_electrodes = double_screen_electrodes(region, cutoff=threshold)
if region == 'Fusiform Gyrus':
activations = calculate_mean_activation('Visage', region, 'All', True)
num_electrodes = np.shape(activations)[0]
activations_other = np.zeros((len(comparison_categories_face), num_electrodes, 32))
idx = 0
for key in category_mapping:
if key not in comparison_categories_face:
continue
activations_other[idx][:][:] = calculate_mean_activation(key, region, 'All', True)
idx += 1
activations_other = np.mean(activations_other, axis=0)
elif region == 'Parahippocampal Gyrus':
activations = calculate_mean_activation('Scene', region, 'All', True)
num_electrodes = np.shape(activations)[0]
activations_other = np.zeros((len(comparison_categories_scenes), num_electrodes, 32))
idx = 0
for key in category_mapping:
if key not in comparison_categories_scenes:
continue
activations_other[idx][:][:] = calculate_mean_activation(key, region, 'All', True)
idx += 1
activations_other = np.mean(activations_other, axis=0)
elif region == 'IT':
activations = calculate_mean_activation('All', region, 'All', True)
activations_other = calculate_mean_activation('Scramble', region, 'All', True)
activations = activations[top_electrodes]
activations_other = activations_other[top_electrodes]
factor_increase = activations / activations_other # (num_electrodes, 32)
# now we take the mean factor increase across time
factor_increase = np.mean(factor_increase, axis=0) # (32, )
# get the average of their responses to a particular category
time_bins = np.arange(0, 1000, 31.25)
plt.figure()
plt.plot(time_bins[:30], factor_increase[:30])
plt.plot(time_bins[:30], factor_increase[:30])
plt.title('Factor Increase for important electrodes in region {0}'.format(region))
plt.xlabel('Time in ms')
plt.show()
if __name__ == '__main__':
# plot_top_electrodes('Parahippocampal Gyrus')
## lets get the number of electrodes in each region ##
areas = ['IT', 'Fusiform Gyrus', 'Parahippocampal Gyrus']
meta_data = np.zeros((3, 3))
for idx, area in enumerate(areas):
meta_data[0, idx] = get_num_electrodes_in_area(area)
meta_data[1, idx] = len(double_screen_electrodes(area, threshold))
# find the number of statistically significant time steps
p_value_face = perform_sign_test('Visage', 'Fusiform Gyrus')
p_value_scene = perform_sign_test('Scene', 'Parahippocampal Gyrus')
p_value_IT = perform_sign_test('All', 'IT')
meta_data[2, 0] = sum(p_value_IT < 0.05)
meta_data[2, 1] = sum(p_value_face < 0.05)
meta_data[2, 2] = sum(p_value_scene < 0.05)
print('here')
|
[
"hermessuen@gmail.com"
] |
hermessuen@gmail.com
|
d3953541a3232bf75dc817c8f0cb769f63101c4e
|
2e2c9bbfac80d32eed54067e5e27b3a13a5a43b6
|
/test2.py
|
7f91d94ee8cb960ae4d3c2f186641463a70b2c86
|
[] |
no_license
|
andreas-chrysanthou/practicals
|
783060c2cb0c068370c9e6ca5f46b26b2a3a7d26
|
90ba01593a0309a179fe9e39c2dada55ec834649
|
refs/heads/master
| 2021-06-30T10:58:32.639729
| 2017-09-13T15:38:08
| 2017-09-13T15:38:08
| 103,274,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import numpy as np
import matplotlip as plt
def initialBell(x):
return np. where(x%1.<0.5,np.power(np.sin(2*x*np.pi),2),0)
nx=40
c=0.2
x = np.linspace(0.0, 1.0, nx+1)
phi = initialBell(x)
phiNew= phi.copy()
phiOld= phi.copy()
for j in xrange(1,nx):
phi[j]=phiOld[j]-0.6*c*(phiOld[j+1] - phiOld[j-1])
phi[0]
|
[
"eeac@leeds.ac.uk"
] |
eeac@leeds.ac.uk
|
e348f06c33c9ec3c83ccc95715ffab26b936fbaf
|
ce311b4fcccba0096f08b3988ce303d58ea5cbdd
|
/Lesson 13.py
|
5f3225836f519455e707525c86c4ab6323bf874f
|
[] |
no_license
|
codybaraks/student-data
|
052b56c4874d85238451c56e44133ed5f0fb7535
|
1e5aa8b3c1e793ccd4ec901fa662708966b6fa77
|
refs/heads/master
| 2020-04-06T09:46:10.523028
| 2018-11-16T08:41:43
| 2018-11-16T08:41:43
| 157,355,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# functions
def talk_to_the_parrot():
print("hello world lets learn python")
talk_to_the_parrot()
def conversation(name,names):
print("Keneth is a good guy and he's ok!")
print("{} sleeps all day and {} works all day".format(name,names))
conversation("KENNETH", "he")
# handling exceptions
try:
count = int(input("Give me a number:"))
except ValueError:
print("Thats not a number")
else:
print(count * "hello world")
# Shopping cart
shopping_cart = []
print("Choose the items you want in the shopping cart")
print("Enter [DONE] once your done!")
while True:
try:
new_item = str(input(" > "))
if new_item == 'DONE'.lower() or new_item == 'done'.upper():
break
# Add new items
shopping_cart.append(new_item)
except:
pass
print("Here's your list")
for item in shopping_cart:
print(item)
print("--------------------")
if len(shopping_cart) > 3:
print(shopping_cart[0], shopping_cart[1])
else:
for item in shopping_cart:
print(item)
|
[
"earvinbaraka@gmail.com"
] |
earvinbaraka@gmail.com
|
1dae13817c7c1db665e73e0220cc2013ab0c0e48
|
cf24bb1895f96a1bbaef9aa1018e48e1b6d02324
|
/fink_voevent/vo_writer_lib.py
|
0f97e85e7f25bb6f19699d7d143511a141d42aa9
|
[
"Apache-2.0"
] |
permissive
|
astrolabsoftware/fink-voevent
|
4f5076d60991cdbc64851221dd67113da5e63d5e
|
b34648bf63b41e8a0e7c78997e2bc53453568f96
|
refs/heads/master
| 2021-07-08T17:10:12.483207
| 2021-03-05T06:42:23
| 2021-03-05T06:42:23
| 228,861,915
| 0
| 0
|
Apache-2.0
| 2021-03-05T06:42:23
| 2019-12-18T14:52:18
| null |
UTF-8
|
Python
| false
| false
| 3,621
|
py
|
#!/usr/bin/env python
# Copyright 2020 AstroLab Software
# Author: Julien Peloton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import string
import doctest
import pyarrow
import pyarrow.parquet as pq
def get_hdfs_connector(host: str, port: int, user: str):
""" Initialise a connector to HDFS
Parameters
----------
host: str
IP address for the host machine
port: int
Port to access HDFS data.
user: str
Username on Hadoop.
Returns
----------
fs: pyarrow.hdfs.HadoopFileSystem
"""
return pyarrow.hdfs.connect(host=host, port=port, user=user)
def write_dataframe(df, outpath: str, fs=None):
""" Write a Pandas DataFrame to HDFS
Parameters
----------
df: Pandas DataFrame
Input Pandas DataFrame containing alert data
outpath: str
Full path (folder+filename) where to store the data.
fs: filesystem, optional
If None (default), assume file-like object. For
HDFS see `pyarrow.hdfs`.
"""
table = pyarrow.Table.from_pandas(df)
pq.write_table(table, outpath, filesystem=fs)
def check_dir_exist(directory: str, usehdfs: bool) -> bool:
""" Check if `directory` exists.
Note that it also supports hdfs.
Parameters
----------
directory: str
Path to a directory. Can be hdfs:///path/to/somewhere
usehdfs: bool
Set it to True for HDFS file system.
Returns
----------
ok: bool
True if the directory exists, False otherwise
"""
if usehdfs:
cmd = 'hdfs dfs -stat {}'.format(directory)
ok = os.system(cmd) == 0
else:
ok = os.path.exists(directory)
return ok
def string_to_filename(input_string: str, replaceby: str = '_') -> str:
""" Strip weird, confusing or special characters from input_string
Parameters
----------
input_string: str
Input string with any character
replaceby: str
character to use to replace "/" and "\"
Returns
----------
out: str
Sanitized input_string that can safely be used as filename.
Examples
----------
>>> bad_string = "i_am/a_string"
>>> good_string = string_to_filename(bad_string)
>>> print(good_string)
i_am_a_string
"""
# Allow ".", but not as the first character.
if input_string[0] == ".":
input_string = input_string[1:]
# Replace "/" and "\" with `replaceby` for readability.
striped = input_string.replace("/", replaceby).replace("\\", replaceby)
# characters allowed
allowed = string.digits + string.ascii_letters + "_."
return "".join(x for x in striped if x in allowed)
def is_observation(data) -> bool:
""" check if the event is a real observation based on the event role.
Parameters
----------
data: xml
decoded voevent
Returns
----------
out: bool
True if the event role is an observation.
False otherwise (test/utility).
"""
return data.attrib['role'] == 'observation'
if __name__ == "__main__":
sys.exit(doctest.testmod()[0])
|
[
"peloton@lal.in2p3.fr"
] |
peloton@lal.in2p3.fr
|
7a206297e90170f543a02390c8c15dd7bcfee7f0
|
ea93731a3b3b6f57df0516d18566629ef9a058d9
|
/bin/django-admin
|
1c7acf6e153e29af67fa5a7e971a84f246c60fcd
|
[] |
no_license
|
jalanning/djangoTutorial
|
78066ea773212bebfffad0292796e441935b8b1a
|
3e3efac26432871a82e09ba1a2a92806394592d9
|
refs/heads/master
| 2023-08-21T23:39:47.603872
| 2021-10-17T15:08:46
| 2021-10-17T15:08:46
| 417,905,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
#!/Users/jakelanning/Code/tryDjangoEnv/bin/python3.9
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"lanning.jake.w@gmail.com"
] |
lanning.jake.w@gmail.com
|
|
654e1d8bf235ad286ce0ad5feddb15931f30bf0f
|
91efc7cd77f7e5066a341b5986b796588803daa5
|
/python/week3/flights.py
|
4f8a99657903f94854693b3e155392615fc031e8
|
[] |
no_license
|
maysasaad/programmeren
|
c5949947f0faf3f999b188004e1655414d694bf1
|
148206cab74e64d7d9f1e5465cce56f106c54d39
|
refs/heads/master
| 2023-05-05T00:49:28.646189
| 2014-03-02T15:09:41
| 2014-03-02T15:09:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
def report_status(scheduled_time, estimated_time):
""" (number, number) -> str
Return the flight status (on_time, early, deayed)
for a flight that was scheduled to arrive at
scheduled_time, but is now estimated to arrive
at estimated_time
Pre-condition: 0.0 <= scheduled_time < 24.0 and
0.0 <= estimated_time < 24.0
>>> report_status(14.3, 14.3)
'on time'
>>> report_status(12.5, 11.5)
'early'
>>> report_status(9.0, 9.5)
'delayed'
"""
if scheduled_time == estimated_time:
return 'on time'
elif scheduled_time > estimated_time:
return 'early'
else:
return 'delayed'
|
[
"rick.peters@me.com"
] |
rick.peters@me.com
|
e842d47c65b49f7baf66ad14c86f7b7c9b1e413b
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/BitPim/rev3177-3237/right-branch-3237/midifile.py
|
2c8d9d84bbc3c8f068fe079043d3fd8f31067e0b
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,795
|
py
|
import common
import fileinfo
module_debug=False
class MIDIEvent(object):
META_EVENT=0
SYSEX_EVENT=1
SYSEX1_EVENT=2
MIDI_EVENT=3
LAST_MIDI_EVENT=4
type_str=('Meta', 'SYSEX', 'SYSEX cont', 'MIDI', 'Last MIDI')
def __init__(self, file, offset, last_cmd=None):
self.__f=file
self.__start=self.__ofs=offset
self.__time_delta=self.__get_var_len()
b=self.__get_int()
if b==0xff:
self.__get_meta_event()
elif b==0xf0 or b==0xf7:
self.__get_sysex_event(b)
else:
self.__get_midi_event(b, last_cmd)
self.__total_len=self.__ofs-self.__start
def __get_int(self):
i=int(self.__f.GetByte(self.__ofs))
self.__ofs+=1
return i
def __get_bytes(self, len):
data=self.__f.GetBytes(self.__ofs, len)
self.__ofs+=len
return data
def __get_var_len(self):
t=0
b=self.__get_int()
while (b&0x80):
t=(t<<7)|(b&0x7f)
b=self.__get_int()
return (t<<7)|(b&0x7f)
def __get_meta_event(self):
self.__type=self.META_EVENT
self.__cmd=self.__get_int()
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_sysex_event(self, cmd):
if cmd==0xf0:
self.__type=self.SYSEX_EVENT
else:
self.__type=self.SYSEX1_EVENT
self.__cmd=cmd
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_midi_event(self, cmd, last_cmd):
if cmd&0x80:
i=cmd
self.__type=self.MIDI_EVENT
self.__param1=self.__get_int()
else:
i=last_cmd
self.__type=self.LAST_MIDI_EVENT
self.__param1=cmd
self.__cmd=(i&0xf0)>>4
self.__midi_channel=i&0x0f
if self.__cmd==0x0c or self.__cmd==0x0d:
self.__len=1
self.__param2=None
else:
self.__len=2
self.__param2=self.__get_int()
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_cmd(self):
return self.__cmd
cmd=property(fget=__get_cmd)
def __get_midi_channel(self):
return self.__midi_channel
midi_channel=property(fget=__get_midi_channel)
def __get_param_len(self):
return self.__len
param_len=property(fget=__get_param_len)
def __get_params(self):
return self.__param1, self.__param2
params=property(fget=__get_params)
def __str__(self):
if self.type==self.MIDI_EVENT or \
self.type==self.LAST_MIDI_EVENT:
return '0x%04x: %s cmd: 0x%x, Channel: %d, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.midi_channel, self.param_len)
else:
return '0x%04x: %s cmd: 0x%x, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.param_len)
class MIDITrack(object):
def __init__(self, file, offset):
self.__f=file
self.__ofs=offset
if module_debug:
print 'New Track @ ofs:', offset
if self.__f.GetBytes(self.__ofs, 4)!='MTrk':
raise TypeError, 'not an MIDI track'
self.__len=self.__f.GetMSBUint32(self.__ofs+4)
ofs=self.__ofs+8
ofs_end=ofs+self.__len
last_cmd=None
self.__time_delta=0
self.__mpqn=None
while ofs<ofs_end:
e=MIDIEvent(file, ofs, last_cmd)
if module_debug:
print e
ofs+=e.total_len
self.__time_delta+=e.time_delta
if e.type==e.META_EVENT:
if e.cmd==0x51:
p1, p2=e.params
self.__mpqn=(ord(p1[0])<<16)|(ord(p1[1])<<8)|ord(p1[2])
if e.type==e.MIDI_EVENT or e.type==e.LAST_MIDI_EVENT:
last_cmd=(e.cmd<<4)|e.midi_channel
else:
last_cmd=e.cmd
self.__total_len=ofs-self.__ofs
if module_debug:
print 'self.__ofs', self.__ofs+8, 'self.__len:', self.__len, 'ofs: ', ofs
print 'time delta:', self.__time_delta, 'MPQN: ', self.__mpqn
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_mpqn(self):
return self.__mpqn
mpqn=property(fget=__get_mpqn)
class MIDIFile(object):
def __init__(self, file_wraper):
try:
self.__valid=False
self.__file=file_wraper
if self.__file.GetBytes(0, 4)!='MThd' or \
self.__file.GetMSBUint32(4)!=6:
return
self.__valid=True
self.__type=self.__file.GetMSBUint16(8)
self.__num_tracks=self.__file.GetMSBUint16(10)
self.__time_division=self.__file.GetMSBUint16(12)
self.__tracks=[]
self.__mpqn=2000000
file_ofs=14
time_delta=0
for i in range(self.__num_tracks):
trk=MIDITrack(self.__file, file_ofs)
self.__tracks.append(trk)
file_ofs+=trk.total_len
time_delta=max(time_delta, trk.time_delta)
if trk.mpqn is not None:
self.__mpqn=trk.mpqn
self.__duration=(self.__mpqn*time_delta/self.__time_division)/1000000.0
if module_debug:
print 'type:', self.__type
print 'time division:', self.__time_division
print 'num of tracks:', self.__num_tracks
print 'MPQN:', self.__mpqn
print 'longest time delta: ', time_delta
print 'duration:', self.__duration
except:
self.__valid=False
def __get_valid(self):
return self.__valid
valid=property(fget=__get_valid)
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_num_tracks(self):
return self.__num_tracks
num_tracks=property(fget=__get_num_tracks)
def __get_duration(self):
return self.__duration
duration=property(fget=__get_duration)
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
33490b42c85fee01be3f6432c411c486ae7157e5
|
aca253ff1a97c96a1a0a9a5802aa623789662bb1
|
/p034/statue_bar.py
|
ebd6c7ce45949c996b6d834401e27a09c8df4f7f
|
[] |
no_license
|
KD-huhu/PyQt5
|
a6128a34b93f6e2da7216d5818f66dc9614216bc
|
1c33a6549c2fcf663168256553d8c24e25d9a69c
|
refs/heads/master
| 2022-07-03T07:37:29.837547
| 2020-05-17T14:54:39
| 2020-05-17T14:54:39
| 261,768,854
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
import sys, math
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class StatusBar(QMainWindow):
def __init__(self):
super(StatusBar, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("状态栏演示")
self.resize(300, 200)
bar = self.menuBar() # 创建菜单对象
file = bar.addMenu("File") # 添加菜单对象
file.addAction("show")
file.triggered.connect(self.processTrigger) # 绑定槽
self.setCentralWidget(QTextEdit())
self.statusBar = QStatusBar() # 创建状态栏对象
self.setStatusBar(self.statusBar) # 设置状态栏
def processTrigger(self, q):
if q.text() == "show":
self.statusBar.showMessage(q.text() + " 菜单被点击了", 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = StatusBar()
main.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
KD-huhu.noreply@github.com
|
2c2eba017b299584cc34574addc7412cb5c9635b
|
8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b
|
/hackerrank/algorithm/string/gem_stones.py
|
e3ff22b12568fff4bf1fd684c35dd47da7151f2d
|
[] |
no_license
|
hizbul25/programming_problem
|
9bf26e49ed5bb8c9c829d00e765c9401222fb35c
|
2acca363704b993ffe5f6c2b00f81a4f4eca7204
|
refs/heads/master
| 2021-01-10T22:28:26.105787
| 2018-01-21T16:45:45
| 2018-01-21T16:45:45
| 65,394,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#URL: https://www.hackerrank.com/challenges/gem-stones
n = int(input())
all_elem = set(input())
for g in range(n - 1):
all_elem &= set(input())
print(len(all_elem))
|
[
"hizbul.ku@gmail.com"
] |
hizbul.ku@gmail.com
|
91bfd646014d5085f0aa71ff651287ec76a6cad7
|
d7a526650915d7bf9484a3258bf3f6db7896a97e
|
/第一天实验/crackRar.py
|
4a4d773afd96359ae850ca59457bc7dc1390591e
|
[] |
no_license
|
lletsGoo/1stDay_-
|
428e8e39106261a466e93e7a65aa588ce9fa9250
|
091f2f2f5bba70659e2b3ac4e0460945b1498886
|
refs/heads/master
| 2021-02-10T01:07:19.439568
| 2020-03-08T00:38:50
| 2020-03-08T00:38:50
| 244,340,780
| 0
| 0
| null | 2020-03-03T02:42:10
| 2020-03-02T10:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
from unrar import rarfile
rarpath='D:\大四\大四下\网络攻防\于晗\第一天-生成字典压缩包爆破\password.rar'
pwdspath='D:\大四\大四下\网络攻防\于晗\第一天-生成字典压缩包爆破\password.txt'
pwds=open(pwdspath,'r')
rf = rarfile.RarFile(rarpath,'r')
print("开始爆破")
while not True:
for line in pwds.readlines():
pwd = line.strip('\n')
try:
rf.extractall(pwd=pwd)
print("压缩包的密码是" + pwd)
except:
pass
|
[
"noreply@github.com"
] |
lletsGoo.noreply@github.com
|
c66cbca886fdbd3d6954818d2e7c22c7085392fc
|
aeaaed0146ef5044eff97e23d33e45f1da3e8657
|
/flask_app/models/user.py
|
0959cd5a1c48781c3a60642db83104de5910306e
|
[] |
no_license
|
Bchaseley/stellar
|
eacdda922b2df860c3b0d2692c091365f7c88c76
|
40b475a86c2d31c3e27c28bf25a3d61afcffb2f4
|
refs/heads/master
| 2023-05-10T23:36:40.765167
| 2021-05-27T00:13:16
| 2021-05-27T00:13:16
| 371,162,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
import re
from flask_app import app
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt(app)
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class User:
def __init__(self,data):
self.id = data['id']
self.first_name = data['first_name']
self.email = data['email']
self.password = data['password']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def add_user(cls, data):
query = 'INSERT INTO users (first_name, email, password, created_at, updated_at) VALUES (%(first_name)s,%(email)s,%(password)s,NOW(),NOW());'
user_id = connectToMySQL('stellar').query_db(query, data)
return user_id
@classmethod
def get_by_email(cls,data):
query = "SELECT * FROM users WHERE email = %(email)s;"
result = connectToMySQL("stellar").query_db(query,data)
if len(result) < 1:
return False
return cls(result[0])
@classmethod
def get_by_id(cls,data):
query = "SELECT * FROM users WHERE id = %(id)s;"
result = connectToMySQL("stellar").query_db(query,data)
if len(result) < 1:
return False
return cls(result[0])
@staticmethod
def validate_user(user):
is_valid = True
if len(user['first_name']) < 3:
flash("First Name must be at least 3 characters.")
is_valid = False
if len(user['password']) < 8:
flash("Password must be at least 8 characters.")
if not EMAIL_REGEX.match(user['email']):
flash("Invalid email address!")
is_valid = False
return is_valid
|
[
"bchaseley@gmail.com"
] |
bchaseley@gmail.com
|
9df035da71a8354e73397ed5fd9483a3a837b5d5
|
62e985b6bc2cd04be506c9f4b586f6a0bd5a8b1c
|
/docs/_docs
|
2e46ca4836023863c54a487374eead67897a2d9d
|
[
"MIT"
] |
permissive
|
delfick/nose-focus
|
ece09553d26ce4323e449b5e50f98e63a21d1699
|
89ceae691fabb27c35d4a67f0edf8dec17737f3f
|
refs/heads/master
| 2023-07-10T22:44:29.271678
| 2023-06-23T06:36:00
| 2023-06-23T06:36:00
| 20,155,739
| 0
| 3
| null | 2019-11-06T22:59:43
| 2014-05-25T13:57:39
|
Python
|
UTF-8
|
Python
| false
| false
| 512
|
#!/usr/bin/env python3
from venvstarter import ignite
import runpy
import os
this_dir = os.path.dirname(__file__)
nose_focus_version = runpy.run_path(
os.path.join(this_dir, "..", "nose_focus", "__init__.py")
)["VERSION"]
with open(os.path.join(this_dir, "requirements.txt"), "r") as fle:
deps = [line.strip() for line in fle.readlines() if line.strip()]
deps.append(f"-e file:{this_dir}/..#egg=nose_focus=={nose_focus_version}")
ignite(this_dir, "sphinx-build", deps=deps, min_python_version=3.6)
|
[
"stephen@delfick.com"
] |
stephen@delfick.com
|
|
4818c68b466968e91b3b4627012b7c34057cd693
|
c3cf5e0dc112b9118eee75264d069c0267a8d810
|
/utils/no_segment_processer.py
|
b7ea765b1888edcc097d9a5c3e684dae1a6b0377
|
[] |
no_license
|
chengtbf/ImageCaption
|
e8f724f594ef7122940a6dcb3f40c4def67092b1
|
76c2284d3f05a6c34c16744ac2e1ddc6d0cf83d5
|
refs/heads/master
| 2021-04-12T11:27:41.926871
| 2018-07-02T08:35:30
| 2018-07-02T08:35:30
| 126,583,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
def copy_value(list):
ret = []
for e in list:
ret.append(e)
return ret
in_file = 'D:/train.txt'
out_dic_file = 'D:/dictionary.txt'
out_vec_file = 'D:/train_vector.txt'
sentences = []
content = []
for line in open('D:/train.txt'):
line = line.split('\n')[0]
if line.isdigit():
if len(content) > 0:
sentences.append(copy_value(content))
content.clear()
else:
content.append(line)
sentences.append(copy_value(content))
dic = {}
dic["<S>"] = 1
dic["</S>"] = 2
dic_size = 3
for st in sentences:
for sent in st:
for i in range(len(sent)):
if sent[i] in dic:
continue
else:
dic[sent[i]] = dic_size
dic_size += 1
dic_size -= 1
#print(dic['大'])
#output map
with open(out_dic_file, 'w') as f:
id2word = {}
for k, v in dic.items():
id2word[v] = k
for i in range(dic_size):
f.write(id2word[i+1] + " " + repr(i+1) + '\n')
#output vector
with open(out_vec_file, 'w') as f:
for index in range(len(sentences)):
f.write(repr(index+1) + "\n")
for sent in sentences[index]:
f.write("1 ")
for i in range(len(sent)):
#print(sent[i])
#print(dic[sent[i]])
f.write(repr(dic[sent[i]]) + " ")
f.write("2\n")
|
[
"619828575@qq.com"
] |
619828575@qq.com
|
a582fa06697213f927e533229de16f0b7239ace6
|
099d1dbc4e40260086939a26a8a7e2084879e31c
|
/truvisory/settings.py
|
730d4002e67146fea2015edd0c0c3611296d4ec5
|
[] |
no_license
|
bhavybhatia/truvisory
|
a198918fa8ccadec87efb200382e7478df8fdeff
|
089d810a4e2b05d380b1d1a37ba5cc7dce1471fe
|
refs/heads/master
| 2020-09-02T04:29:09.867592
| 2019-11-07T06:36:35
| 2019-11-07T06:36:35
| 219,132,029
| 2
| 3
| null | 2019-11-03T00:22:11
| 2019-11-02T09:33:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,122
|
py
|
"""
Django settings for truvisory project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mlwx37gqax2e7v*oz=m)9+tku(!e!9oe0g(f!=*&z6$__ilw09'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'invest',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'truvisory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'truvisory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"bhatia.bhavy@gmail.com"
] |
bhatia.bhavy@gmail.com
|
00c870418471d3cbd644cbea12a5c9bdb8ff5530
|
f8a114b410803515195a156bc678d76a990d1f83
|
/config/settings/base.py
|
58f39d4a9fcd2124f896be5e4d4bfe7177a1ba1d
|
[
"MIT"
] |
permissive
|
mahidul-islam/chatbot_cookiecutter
|
86c03941f56e815d4b27fbfec2e7499aa675b065
|
52c8cdf01147b647e076d5dbe5fa1cc13b0bf1ee
|
refs/heads/master
| 2022-12-08T14:20:55.851457
| 2020-08-28T16:27:54
| 2020-08-28T16:27:54
| 291,094,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,971
|
py
|
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# chatbot/
APPS_DIR = ROOT_DIR / "chatbot"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Asia/Dhaka"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///chatbot")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
LOCAL_APPS = [
"chatbot.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "chatbot.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"chatbot.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""zihan""", "mizihan84@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "chatbot.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "chatbot.users.adapters.SocialAccountAdapter"
# Your stuff...
# ------------------------------------------------------------------------------
|
[
"mizihan84@gmail.com"
] |
mizihan84@gmail.com
|
36ccee9e76a6a2b3df09ae15e02a7a5dc345c6ac
|
2d29a361bf62a8b7e92fc395385f9508641c07ba
|
/Image_classifier_part2/building_classifier.py
|
5f66c35588838fd1fed889a3777845e9195baa7c
|
[] |
no_license
|
dalpengholic/JCB_for_example
|
4aee5401407e837ff094d1ce2e638fe596583993
|
fd323ff2e88d904ce98ce75b0f473eed466b4d5e
|
refs/heads/master
| 2022-12-06T05:44:22.028163
| 2020-08-25T19:30:26
| 2020-08-25T19:30:26
| 290,304,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
class BuildModel:
# Set defaults units for each model
initial_units = {"vgg16":25088, "vgg13":25088, "densenet121":1024}
def __init__(self, architecture=None, learning_rate=None, hidden_units=None):
# Object : constructor
# Input : architecture of model, learning_Rate, hidden_units
if architecture is None:
self.__architecture = 'vgg16'
else:
self.__architecture = architecture
if learning_rate is None:
self.__learning_rate = 0.002
else :
self.__learning_rate = learning_rate
if hidden_units is None:
self.__hidden_units = 4096
else :
self.__hidden_units = hidden_units
def build_model(self):
# load model from torchvision
from torchvision import datasets, transforms, models
from torch import nn, optim
if self.__architecture == 'vgg16':
model = models.vgg16(pretrained=True)
elif self.__architecture == 'vgg13':
model = models.vgg13(pretrained=True)
elif self.__architecture == 'densenet121':
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
# Set a classifier
classifier = nn.Sequential(nn.Linear(BuildModel.initial_units[self.__architecture],self.__hidden_units),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(self.__hidden_units,1024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024,102),
nn.LogSoftmax(dim=1))
model.classifier = classifier
self.__model = model
# Set a criterion and an optimizer
self.__criterion = nn.NLLLoss()
self.__optimizer = optim.Adam(model.classifier.parameters(), lr = self.__learning_rate)
self.__result = [self.__model, self.__criterion, self.__optimizer]
return self.__result
|
[
"dalpengholic@gmail.com"
] |
dalpengholic@gmail.com
|
5f2842d9e47bb5e920657062e1fab24687a4074c
|
78be79b52eb3398f7966e670c47ff04748769d07
|
/common/logger.py
|
71d756fb34bbceef4273196cb918fc435e677e41
|
[] |
no_license
|
SmeagolYe/api_0828
|
83b8f39325942d33a1c9cef29622106c42712956
|
7216f23065218ac112372a35ea53826d9c28921e
|
refs/heads/master
| 2022-12-04T09:49:54.391594
| 2020-08-28T08:18:10
| 2020-08-28T08:18:10
| 290,990,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
import logging
from common.dir_config import *
def get_logger(name):
mylogger = logging.getLogger(name)
mylogger.setLevel("DEBUG")
fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s - [%(filename)s:%(lineno)d]"
formatter = logging.Formatter(fmt=fmt)
console_handler = logging.StreamHandler()
console_handler.setLevel("DEBUG")
console_handler.setFormatter(formatter)
file_handler = logging.FileHandler(logs_dir)
file_handler.setLevel("DEBUG")
file_handler.setFormatter(formatter)
mylogger.addHandler(file_handler)
return mylogger
|
[
"317324406@qq.com"
] |
317324406@qq.com
|
08c45ae4c8f7af1394713a8d8566cf2fc56c4188
|
424cb6a75b5a5d32383f2c03916a5799ab8cc728
|
/Variational-AutoEncoder/utils.py
|
ce977c59bf70efc356bf8ce918b720f9246ab454
|
[] |
no_license
|
itsayushthada/CBIVR
|
eea5cd357ebaef627c3e02c1194cdbf3265a704a
|
325613459e74e22968376ab6b1a9049687769b32
|
refs/heads/master
| 2022-03-25T08:22:23.682241
| 2019-12-02T18:20:10
| 2019-12-02T18:20:10
| 225,437,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,528
|
py
|
import tensorflow as tf
import keras
from keras.layers import Input, Dense, Lambda, InputLayer, Activation, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, Deconv2D
from keras.models import Model, Sequential
from keras import backend as K
from keras import metrics
import os
class CelebA:
def __init__(self,
path,
sess,
train=True,
batch_size=32,
height=218,
width=178,
channels=3,
threads=1,
file_type='.jpg'):
image_filenames = [os.path.join(path, img) for img in os.listdir(path) if img.endswith(file_type)]
if train:
image_filenames = image_filenames[:-5000]
else:
image_filenames = image_filenames[-5000:]
input_queue = tf.train.slice_input_producer([image_filenames], shuffle=False)
file_content = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(file_content, channels=3)
image.set_shape([height, width, channels])
image_cropped = image[45:-45, 25:-25]
image_cropped = tf.image.resize_images(image_cropped, (64, 64))
batch = tf.train.batch([image_cropped],
batch_size=batch_size,
num_threads=threads)
self.batch = tf.cast(batch, tf.float32)/256
self.n_batches = len(image_filenames) // batch_size
self.sess = sess
def __iter__(self):
return self
def __next__(self):
x = self.sess.run(self.batch)
return x, x, None
def next(self):
return self.__next__()
def create_encoder(input_dims,
base_filters=64,
layers=4,
latent=512):
w = input_dims[0]//2**layers
h = input_dims[1]//2**layers
c = base_filters*2**(layers-1)
encoder = Sequential()
encoder.add(InputLayer(input_dims))
for i in range(layers):
encoder.add(Conv2D(filters=base_filters*2**i,
kernel_size=(5, 5),
strides=(2, 2),
padding='same',
bias=False))
encoder.add(BatchNormalization(axis=3))
encoder.add(Activation(K.relu))
encoder.add(Reshape([w*h*c]))
encoder.add(Dense(latent*2))
return encoder
def create_decoder(output_dims,
base_filters=64,
layers=4,
latent=512):
w = output_dims[0]//2**layers
h = output_dims[1]//2**layers
c = base_filters*2**(layers-1)
decoder = Sequential()
decoder.add(InputLayer([latent]))
decoder.add(Dense(w*h*c))
decoder.add(Reshape([w, h, c]))
for i in range(layers-1, 0, -1):
decoder.add(Deconv2D(filters=base_filters*2**i,
kernel_size=(5, 5),
strides=(2, 2),
padding='same',
bias=False))
decoder.add(BatchNormalization(axis=3))
decoder.add(Activation(K.relu))
decoder.add(Deconv2D(filters=3,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'))
return decoder
def sample(mean_log_var):
mean, log_var = mean_log_var
eps_shape = mean.get_shape()
epsilon = K.random_normal(shape=eps_shape)
z = epsilon*K.exp(log_var/2)+mean
return z
def create_vae(batch_size,
base_filters=64,
latent=8,
image_size=64,
learning_rate=0.001,
reconstruction_weight=1000,
layers=4):
"""
Constructs VAE model with given parameters.
:param batch_size: size of a batch (used for placeholder)
:param base_filters: number of filters after first layer.
Other layers will double this number
:param latent: latent space dimension
:param image_size: size of input image
Returns compiled Keras model along with encoder and decoder
"""
if isinstance(image_size, int):
image_size = (image_size, image_size)
x = Input(batch_shape=(batch_size, image_size[0], image_size[1], 3))
encoder = create_encoder([image_size[0], image_size[1], 3],
base_filters=base_filters,
latent=latent,
layers=layers)
decoder = create_decoder([image_size[0], image_size[1], 3],
base_filters=base_filters,
latent=latent,
layers=layers)
mean_log_var = encoder(x)
mean_size = mean_log_var.shape[1]//2
mean = Lambda(lambda h: h[:, :mean_size])(mean_log_var)
log_var = Lambda(lambda h: h[:, mean_size:])(mean_log_var)
z = Lambda(sample)([mean, log_var])
reconstruction = decoder(z)
loss_reconstruction = K.mean(metrics.mean_squared_error(x, reconstruction))
loss_KL = - K.mean(0.5 * K.sum(1 + log_var - K.square(mean) - K.exp(log_var), axis=1))
loss = reconstruction_weight*loss_reconstruction + loss_KL
vae = Model(x, reconstruction)
vae.compile(optimizer=keras.optimizers.Adam(lr=learning_rate),
loss=lambda x, y: loss)
return vae, encoder, decoder
|
[
"noreply@github.com"
] |
itsayushthada.noreply@github.com
|
6d42b2716b1fc97ea8d110a1635b51ce2e6b4156
|
ec648e1db4bdc0227a11685e5e4adf8ea4902d75
|
/mysite/urls.py
|
9b2acaaabd2c637d62ad560d6eed9f5f461e8484
|
[] |
no_license
|
kujosHeist/django-quiz-template
|
9da8697d718b6778cb6d5a147b3b02d37bf82b50
|
a4244413578ce24ce3cda2827930b5c922f2abd9
|
refs/heads/master
| 2021-01-23T06:39:46.908611
| 2017-03-27T23:04:32
| 2017-03-27T23:04:32
| 86,386,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^quiz/', include('quiz.urls', namespace="quiz")),
]
|
[
"shane.carty@hotmail.com"
] |
shane.carty@hotmail.com
|
fcf0dc2303afd6a81cd9c7afcb008b2e92a23305
|
1222bbd7ab34f0f485362164da70847bc476561f
|
/term_1/P2-AIND-Isolation-Adam_Liu/tournament_1.py
|
148cc4e93764e2680760b5ae34914fd3beae4ce1
|
[] |
no_license
|
adamliuio/aind
|
1923cac5dcd24e3307c0682c73b746818312ea44
|
138227f6a6c568c4ddc305f16a6331da80c66c5a
|
refs/heads/master
| 2021-09-08T10:17:11.412747
| 2018-03-06T23:12:01
| 2018-03-06T23:12:01
| 118,387,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,803
|
py
|
"""Estimate the strength rating of a student defined heuristic by competing
against fixed-depth minimax and alpha-beta search agents in a round-robin
tournament.
NOTE: All agents are constructed from the student CustomPlayer implementation,
so any errors present in that class will affect the outcome.
The student agent plays a number of "fair" matches against each test agent.
The matches are fair because the board is initialized randomly for both
players, and the players play each match twice -- once as the first player and
once as the second player. Randomizing the openings and switching the player
order corrects for imbalances due to both starting position and initiative.
"""
import itertools
import random
import warnings
from collections import namedtuple
from isolation import Board
from sample_players import (RandomPlayer, open_move_score,
improved_score, center_score)
from game_agent import (MinimaxPlayer, AlphaBetaPlayer, custom_score,
custom_score_2, custom_score_3)
NUM_MATCHES = 5 # number of matches against each opponent
TIME_LIMIT = 150 # number of milliseconds before timeout
DESCRIPTION = """
This script evaluates the performance of the custom_score evaluation
function against a baseline agent using alpha-beta search and iterative
deepening (ID) called `AB_Improved`. The three `AB_Custom` agents use
ID and alpha-beta search with the custom_score functions defined in
game_agent.py.
"""
Agent = namedtuple("Agent", ["player", "name"])
def play_round(cpu_agent, test_agents, win_counts, num_matches):
"""Compare the test agents to the cpu agent in "fair" matches.
"Fair" matches use random starting locations and force the agents to
play as both first and second player to control for advantages resulting
from choosing better opening moves or having first initiative to move.
"""
timeout_count = 0
forfeit_count = 0
for _ in range(num_matches):
games = sum([[Board(cpu_agent.player, agent.player),
Board(agent.player, cpu_agent.player)]
for agent in test_agents], [])
# initialize all games with a random move and response
for _ in range(2):
move = random.choice(games[0].get_legal_moves())
for game in games:
game.apply_move(move)
# play all games and tally the results
for game in games:
winner, _, termination = game.play(time_limit=TIME_LIMIT)
win_counts[winner] += 1
if termination == "timeout":
timeout_count += 1
elif termination == "forfeit":
forfeit_count += 1
return timeout_count, forfeit_count
def update(total_wins, wins):
for player in total_wins:
total_wins[player] += wins[player]
return total_wins
def play_matches(cpu_agents, test_agents, num_matches):
"""Play matches between the test agent and each cpu_agent individually. """
total_wins = {agent.player: 0 for agent in test_agents}
total_timeouts = 0.
total_forfeits = 0.
total_matches = 2 * num_matches * len(cpu_agents)
print("\n{:^9}{:^13}".format("Match #", "Opponent") + ''.join(['{:^13}'.format(x[1].name) for x in enumerate(test_agents)]))
print("{:^9}{:^13} ".format("", "") + ' '.join(['{:^5}| {:^5}'.format("Won", "Lost") for x in enumerate(test_agents)]))
for idx, agent in enumerate(cpu_agents):
wins = {key: 0 for (key, value) in test_agents}
wins[agent.player] = 0
print("{!s:^9}{:^13}".format(idx + 1, agent.name), end="", flush=True)
counts = play_round(agent, test_agents, wins, num_matches)
total_timeouts += counts[0]
total_forfeits += counts[1]
total_wins = update(total_wins, wins)
_total = 2 * num_matches
round_totals = sum([[wins[agent.player], _total - wins[agent.player]]
for agent in test_agents], [])
print(' ' + ' '.join([
'{:^5}| {:^5}'.format(
round_totals[i],round_totals[i+1]
) for i in range(0, len(round_totals), 2)
]))
print("-" * 74)
print('{:^9}{:^13}'.format("", "Win Rate:") +
''.join([
'{:^13}'.format(
"{:.1f}%".format(100 * total_wins[x[1].player] / total_matches)
) for x in enumerate(test_agents)
]))
if total_timeouts:
print(("\nThere were {} timeouts during the tournament -- make sure " +
"your agent handles search timeout correctly, and consider " +
"increasing the timeout margin for your agent.\n").format(
total_timeouts))
if total_forfeits:
print(("\nYour agents forfeited {} games while there were still " +
"legal moves available to play.\n").format(total_forfeits))
def main():
# Define two agents to compare -- these agents will play from the same
# starting position against the same adversaries in the tournament
test_agents = [
Agent(AlphaBetaPlayer(score_fn=improved_score), "AB_Improved"),
Agent(AlphaBetaPlayer(score_fn=custom_score), "AB_Custom"),
Agent(AlphaBetaPlayer(score_fn=custom_score_2), "AB_Custom_2"),
Agent(AlphaBetaPlayer(score_fn=custom_score_3), "AB_Custom_3")
]
# Define a collection of agents to compete against the test agents
cpu_agents = [
# Agent(RandomPlayer(), "Random"),
Agent(MinimaxPlayer(score_fn=open_move_score), "MM_Open"),
# Agent(MinimaxPlayer(score_fn=center_score), "MM_Center"),
# Agent(MinimaxPlayer(score_fn=improved_score), "MM_Improved"),
# Agent(AlphaBetaPlayer(score_fn=open_move_score), "AB_Open"),
# Agent(AlphaBetaPlayer(score_fn=center_score), "AB_Center"),
# Agent(AlphaBetaPlayer(score_fn=improved_score), "AB_Improved")
]
print(DESCRIPTION)
print("{:^74}".format("*************************"))
print("{:^74}".format("Playing Matches"))
print("{:^74}".format("*************************"))
play_matches(cpu_agents, test_agents, NUM_MATCHES)
def calcuTime(start):
period = time() - start
print("Used: %sm %s" % (period//60, period%60))
if __name__ == "__main__":
from time import time
start = time()
main()
print("time spent: %s" % str(time()-start))
|
[
"adamkingleo@gmail.com"
] |
adamkingleo@gmail.com
|
3587499fb12823efd46499611f87c6e9a82c2268
|
9740de6e32e44fe78a812e106833f6c0ca0ce716
|
/main.py
|
2d4bb6b6ad9af84ecbd4e72f94de08b6f4268199
|
[] |
no_license
|
dicksiano/test-autokeras
|
54c8aaf7c16f688e48ad773ab1bbaff26a93a830
|
8e00178435458021d44e9030c1f9642dc2ddd491
|
refs/heads/main
| 2023-01-21T07:19:10.929788
| 2020-12-02T04:51:22
| 2020-12-02T04:51:22
| 317,753,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
from config import Constants
from input_reader import read_data
from neural_network import NeuralNetwork
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import itertools
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import Normalizer, MinMaxScaler
#from autokeras import StructuredDataRegressor
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error')
plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error')
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error')
plt.legend()
plt.show()
def dataset():
with open('meta_cp0.pkl', 'rb') as pkl:
X_train_cp0, y_train_cp0 = pickle.load(pkl)
with open('meta_cpRnd.pkl', 'rb') as pkl:
X_train_cpRnd, y_train_cpRnd = pickle.load(pkl)
print(len(X_train_cp0), len(X_train_cp0[0]), len(X_train_cpRnd), len(X_train_cpRnd[0]))
#print(np.max(y_train_cp0), np.min(y_train_cp0), np.mean(y_train_cp0))
#print(np.max(y_test_cp0), np.min(y_test_cp0), np.mean(y_test_cp0))
#
#print(np.max(y_train_cpRnd), np.min(y_train_cpRnd), np.mean(y_train_cpRnd))
#print(np.max(y_test_cpRnd), np.min(y_test_cpRnd), np.mean(y_test_cpRnd))
assert(len(X_train_cp0) == len(y_train_cp0))
#assert(len(X_test_cp0) == len(y_test_cp0))
assert(len(X_train_cpRnd) == len(y_train_cpRnd))
#assert(len(X_test_cpRnd) == len(y_test_cpRnd))
X_train = list(itertools.chain(X_train_cp0, X_train_cpRnd))
y_train = list(itertools.chain(y_train_cp0, y_train_cpRnd))
#X_test = list(itertools.chain(X_test_cp0, X_test_cpRnd)) #X_test_cp0
#y_test = list(itertools.chain(y_test_cp0, y_test_cpRnd)) #y_test_cp0
print(len(X_train_cp0), len(X_train_cpRnd))
assert( len(X_train) == len(y_train) )
assert( len(X_train) == (len(X_train_cp0)+len(X_train_cpRnd)) )
#assert( len(X_test) == len(y_test) )
#assert( len(X_test) == (len(X_test_cp0)+len(X_test_cpRnd)) )
return np.array(X_train).astype(np.float), np.array(y_train).astype(np.float), np.array(X_train_cp0).astype(np.float), np.array(y_train_cp0).astype(np.float), np.array(X_train_cpRnd).astype(np.float), np.array(y_train_cpRnd).astype(np.float)
def main():
# Setup dataset
X_train, y_train, X_train_cp0, y_train_cp0, X_train_cpRnd, y_train_cpRnd = dataset()
#sc = MinMaxScaler()
#X_train = sc.fit_transform(X_train)
#n = Normalizer()
#X_train = n.fit_transform(X_train)
print(X_train[0])
#mean = X_train.mean(axis=0)
#std = X_train.std(axis=0)
#X_train = (X_train - mean) / std
# define the search
#search = StructuredDataRegressor(max_trials=15, loss='mean_absolute_error')
# perform the search
#search.fit(x=X_train, y=y_train, verbose=1)
# evaluate the model
#mae, _ = search.evaluate(X_train, y_train, verbose=0)
nn = NeuralNetwork(Constants.hyperparameters, X_train_cp0, y_train_cp0)
#K.set_session(tf.get_default_session(), '20201120-052235_0/model')
print( nn.show_configuration() + "_" + str(X_train.shape[0] ) )
#print(np.max(y_train), np.min(y_train), np.mean(y_train))
#print(np.max(y_test), np.min(y_test), np.mean(y_test))
history = nn.train(X_train_cp0, y_train_cp0, X_train_cp0, y_train_cp0) #, X_test, y_test)
print(nn.model.metrics_names, nn.evaluate_model(X_train_cp0, y_train_cp0))
#print(nn.model.metrics_names, nn.evaluate_model(X_test_cp0, y_test_cp0))
print(nn.model.metrics_names, nn.evaluate_model(X_train_cpRnd, y_train_cpRnd))
#print(nn.model.metrics_names, nn.evaluate_model(X_test_cpRnd, y_test_cpRnd))
#plot_history(history)
nn.model.save(nn.show_configuration() + "_" + str(X_train.shape[0]) + "___" + str(len(history.epoch)) )
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
dicksiano.noreply@github.com
|
0c24275613565334a00338ba81c47ae3492726bf
|
af2bcb03b0ca34e376084eb8676666306e2dde99
|
/ext/testlib/result.py
|
38b3322ba180f2f2a71959e9377c871927ef0e1b
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
multifacet/ASAP
|
5c0b26dd2f06cd3c125e809b318c16721720f5e2
|
68cb32c43e3ebad2a5dfb947ce98442375b235c7
|
refs/heads/asap
| 2023-04-11T16:02:42.997035
| 2021-11-30T15:05:22
| 2021-11-30T15:05:22
| 426,381,088
| 4
| 2
|
BSD-3-Clause
| 2022-12-12T23:33:17
| 2021-11-09T20:43:59
|
C++
|
UTF-8
|
Python
| false
| false
| 9,968
|
py
|
# Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
import os
import pickle
import xml.sax.saxutils
from testlib.configuration import config
import testlib.helper as helper
import testlib.state as state
import testlib.log as log
def _create_uid_index(iterable):
index = {}
for item in iterable:
assert item.uid not in index
index[item.uid] = item
return index
class _CommonMetadataMixin:
@property
def name(self):
return self._metadata.name
@property
def uid(self):
return self._metadata.uid
@property
def result(self):
return self._metadata.result
@result.setter
def result(self, result):
self._metadata.result = result
@property
def unsuccessful(self):
return self._metadata.result.value != state.Result.Passed
class InternalTestResult(_CommonMetadataMixin):
def __init__(self, obj, suite, directory):
self._metadata = obj.metadata
self.suite = suite
self.stderr = os.path.join(
InternalSavedResults.output_path(self.uid, suite.uid),
'stderr'
)
self.stdout = os.path.join(
InternalSavedResults.output_path(self.uid, suite.uid),
'stdout'
)
class InternalSuiteResult(_CommonMetadataMixin):
def __init__(self, obj, directory):
self._metadata = obj.metadata
self.directory = directory
self._wrap_tests(obj)
def _wrap_tests(self, obj):
self._tests = [InternalTestResult(test, self, self.directory)
for test in obj]
self._tests_index = _create_uid_index(self._tests)
def get_test(self, uid):
return self._tests_index[uid]
def __iter__(self):
return iter(self._tests)
def get_test_result(self, uid):
return self.get_test(uid)
def aggregate_test_results(self):
results = {}
for test in self:
helper.append_dictlist(results, test.result.value, test)
return results
class InternalLibraryResults(_CommonMetadataMixin):
def __init__(self, obj, directory):
self.directory = directory
self._metadata = obj.metadata
self._wrap_suites(obj)
def __iter__(self):
return iter(self._suites)
def _wrap_suites(self, obj):
self._suites = [InternalSuiteResult(suite, self.directory)
for suite in obj]
self._suites_index = _create_uid_index(self._suites)
def add_suite(self, suite):
if suite.uid in self._suites:
raise ValueError('Cannot have duplicate suite UIDs.')
self._suites[suite.uid] = suite
def get_suite_result(self, suite_uid):
return self._suites_index[suite_uid]
def get_test_result(self, test_uid, suite_uid):
return self.get_suite_result(suite_uid).get_test_result(test_uid)
def aggregate_test_results(self):
results = {}
for suite in self._suites:
for test in suite:
helper.append_dictlist(results, test.result.value, test)
return results
class InternalSavedResults:
@staticmethod
def output_path(test_uid, suite_uid, base=None):
'''
Return the path which results for a specific test case should be
stored.
'''
if base is None:
base = config.result_path
return os.path.join(
base,
str(suite_uid).replace(os.path.sep, '-'),
str(test_uid).replace(os.path.sep, '-'))
@staticmethod
def save(results, path, protocol=pickle.HIGHEST_PROTOCOL):
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(path, 'wb') as f:
pickle.dump(results, f, protocol)
@staticmethod
def load(path):
with open(path, 'rb') as f:
return pickle.load(f)
class XMLElement(object):
def write(self, file_):
self.begin(file_)
self.end(file_)
def begin(self, file_):
file_.write('<')
file_.write(self.name)
for attr in self.attributes:
file_.write(' ')
attr.write(file_)
file_.write('>')
self.body(file_)
def body(self, file_):
for elem in self.elements:
file_.write('\n')
elem.write(file_)
file_.write('\n')
def end(self, file_):
file_.write('</%s>' % self.name)
class XMLAttribute(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, file_):
file_.write('%s=%s' % (self.name,
xml.sax.saxutils.quoteattr(self.value)))
class JUnitTestSuites(XMLElement):
name = 'testsuites'
result_map = {
state.Result.Errored: 'errors',
state.Result.Failed: 'failures',
state.Result.Passed: 'tests'
}
def __init__(self, internal_results):
results = internal_results.aggregate_test_results()
self.attributes = []
for result, tests in results.items():
self.attributes.append(self.result_attribute(result,
str(len(tests))))
self.elements = []
for suite in internal_results:
self.elements.append(JUnitTestSuite(suite))
def result_attribute(self, result, count):
return XMLAttribute(self.result_map[result], count)
class JUnitTestSuite(JUnitTestSuites):
name = 'testsuite'
result_map = {
state.Result.Errored: 'errors',
state.Result.Failed: 'failures',
state.Result.Passed: 'tests',
state.Result.Skipped: 'skipped'
}
def __init__(self, suite_result):
results = suite_result.aggregate_test_results()
self.attributes = [
XMLAttribute('name', suite_result.name)
]
for result, tests in results.items():
self.attributes.append(self.result_attribute(result,
str(len(tests))))
self.elements = []
for test in suite_result:
self.elements.append(JUnitTestCase(test))
def result_attribute(self, result, count):
return XMLAttribute(self.result_map[result], count)
class JUnitTestCase(XMLElement):
name = 'testcase'
def __init__(self, test_result):
self.attributes = [
XMLAttribute('name', test_result.name),
# TODO JUnit expects class of test.. add as test metadata.
XMLAttribute('classname', str(test_result.uid)),
XMLAttribute('status', str(test_result.result)),
]
# TODO JUnit expects a message for the reason a test was
# skipped or errored, save this with the test metadata.
# http://llg.cubic.org/docs/junit/
self.elements = [
LargeFileElement('system-err', test_result.stderr),
LargeFileElement('system-out', test_result.stdout),
]
if str(test_result.result) == 'Failed':
self.elements.append(JUnitFailure('Test failed', 'ERROR'))
class JUnitFailure(XMLElement):
name = 'failure'
def __init__(self, message, fail_type):
self.attributes = [
XMLAttribute('message', message),
XMLAttribute('type', fail_type),
]
self.elements = []
class LargeFileElement(XMLElement):
def __init__(self, name, filename):
self.name = name
self.filename = filename
self.attributes = []
def body(self, file_):
try:
with open(self.filename, 'r') as f:
for line in f:
file_.write(xml.sax.saxutils.escape(line))
except IOError:
# TODO Better error logic, this is sometimes O.K.
# if there was no stdout/stderr captured for the test
#
# TODO If that was the case, the file should still be made and it
# should just be empty instead of not existing.
pass
class JUnitSavedResults:
@staticmethod
def save(results, path):
'''
Compile the internal results into JUnit format writting it to the
given file.
'''
results = JUnitTestSuites(results)
with open(path, 'w') as f:
results.write(f)
|
[
"sujay.yadalam@gmail.com"
] |
sujay.yadalam@gmail.com
|
3726d6aaf7a37aacef520c0ce1b2f43ad68700e1
|
10ad68a2e8e3958106651ff82a0978aabc82d789
|
/agilicus/v1/namespacegenerator/NamespaceGenerator
|
d84616ecfc855a934f851950b5e1c47976aeae4a
|
[
"Apache-2.0"
] |
permissive
|
alimeerutech/kustomize-plugins
|
c69ef749008c42faded39cc87c304c1400418a30
|
21ebdbc6cde5e80d956bc32bbd2ac9c4c75a2441
|
refs/heads/master
| 2022-03-16T16:15:13.204314
| 2019-12-11T21:51:27
| 2019-12-11T21:53:19
| 256,403,523
| 1
| 0
|
Apache-2.0
| 2020-04-17T04:45:57
| 2020-04-17T04:45:56
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
#!/usr/bin/env /usr/bin/python3
import pkg_resources
pkg_resources.require('PyYAML>=5.1.1')
import sys
from itertools import tee
import yaml
import shutil
import os
import subprocess
with open(sys.argv[1], "r") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Error parsing namespace generator input (%s)", file=sys.stderr)
sys.exit(1)
if "addRegistrySecret" in data and data["addRegistrySecret"]:
sops = shutil.which(
"sops",
path="/bin:/usr/bin:/snap/bin:%s/go/bin" % os.path.expanduser("~"),
)
if not sops:
print("Error in PrivateRegistry: sops not on path")
sys.exit(1)
try:
secret_value = (
subprocess.check_output(
[
sops,
"-d",
"--extract",
'["%s"]' % data["input_secret"],
data["input_secret_file"],
],
stderr=subprocess.STDOUT,
)
.strip()
.decode("ascii")
)
except subprocess.CalledProcessError as exc:
print("Is your secret in %s dir?" % os.getcwd(), file=sys.stderr)
print(f"Error calling sops: {exc}", file=sys.stderr)
sys.exit(1)
def gen(ns, labels):
print(
"""---
apiVersion: v1
kind: Namespace
metadata:
name: %s"""
% ns
)
if len(labels):
print(" labels:")
for k in labels.keys():
print(" %s: %s" % (k, labels[k]))
for ns in data["namespaces"]:
gen(ns, data["labels"])
if "addRegistrySecret" in data and data["addRegistrySecret"]:
SECRET = f"""
---
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: {ns}
annotations:
kustomize.config.k8s.io/id: 1
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: '{secret_value}'
"""
print(SECRET)
|
[
"don@agilicus.com"
] |
don@agilicus.com
|
|
d4f3bd04ac6fb7adcb70da526c691f7887aa0573
|
1f68b01573ad873231958d98a686b7dbbe303279
|
/searchEngine/App/views.py
|
5873e1fbb65728c4bee308b466490212fb261257
|
[] |
no_license
|
mkotha33/MovieSearchEngine-PythonDjango
|
3a3cb07c1c02a7be9aba1b10dd647b05e2b6729d
|
485a02b33cdbd3c597ac8395855ec4810fd2cdda
|
refs/heads/master
| 2020-04-26T10:03:55.842871
| 2019-03-02T11:45:32
| 2019-03-02T11:45:32
| 173,476,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from . import FileHandler as fh
from . import MyLucene as ML
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
# Create your views here.
def index(request):
'''filepath = "C:/Users/M.S.Shruthi/Desktop/Desktop files/sem_5/InformationRetrieval/project/ir-project/movie"
totalFiles = 100
listOfFiles = []
for i in range(totalFiles):
listOfFiles.append(filepath+str(i+1)+'.txt')
WordsList = fh.ListOfWords(listOfFiles)
WordsList = fh.removeExtraCharacters(WordsList)
UniqueList = fh.getUniqueTerms(WordsList)
StopWordFreeUniqueList, WordsListNew = ML.removeStopWords(WordsList, UniqueList,100)
WordsListNewCopy = WordsListNew
AfterLemmWordsList, AfterLemmUniqueList = ML.MyLemmatizer(WordsListNew,StopWordFreeUniqueList)
FinalWords, FinalUnique = ML.Stemmer(AfterLemmWordsList, AfterLemmUniqueList)
ML.BuildIndex(FinalUnique, FinalWords)'''
x = ['shruthi', 'mahi', 'harshi']
template = loader.get_template("App/index.html")
context = {
'list' : x
}
return HttpResponse(template.render(context,request))
|
[
"maahi.kotha@gmail.com"
] |
maahi.kotha@gmail.com
|
70add22be9a70d8ceca4e71014665764dd5f5aff
|
bc2945c99f828083ca78b3bfcfe220a134fbd8b0
|
/users/migrations/0010_auto_20200725_1159.py
|
c0fe7d7e4a714113bfff57bc26a2b57875bf0f3a
|
[] |
no_license
|
Kyeza/web_system
|
5bde9231551b7a94b535fe707db99ade351bd4fb
|
686a701469b13454d39e4f0c6b342b22befdb345
|
refs/heads/uganda-master
| 2022-12-14T13:43:17.833502
| 2020-12-11T07:23:19
| 2020-12-11T07:23:19
| 176,704,006
| 2
| 1
| null | 2022-12-08T11:07:51
| 2019-03-20T09:55:33
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
# Generated by Django 3.0.6 on 2020-07-25 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reports', '0014_auto_20200605_0638'),
('users', '0009_auto_20200721_0727'),
]
operations = [
migrations.AlterField(
model_name='payrollprocessors',
name='summary_report',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='earning_or_deduction', to='reports.ExtraSummaryReportInfo'),
),
]
|
[
"kyezaarnold63@gmail.com"
] |
kyezaarnold63@gmail.com
|
eefa361077f4c426bc428b0e32a4e1feaec3b3ae
|
9f69e893fe2d1af30de77e1b28615ec6023b3b6f
|
/sorting/heapsort.py
|
5f303484285aa09670842144e5477528080066e8
|
[] |
no_license
|
PeppyHare/CtCI
|
7317465948e171a0656d962e1d1b916bb04989b7
|
808f0fa96ccbb2be8234b22f38ab63eedda4d0a5
|
refs/heads/master
| 2021-01-18T00:08:34.348970
| 2017-03-14T01:01:09
| 2017-03-14T01:01:09
| 84,258,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
"""
Worst Case: O(n log n)
Best Case: O(n log n)
Average Case: O(n log n)
"""
def heapsort(a):
result = a
heapify(result, len(result))
end = len(result) - 1
while end > 0:
result[end], result[0] = result[0], result[end]
end -= 1
siftDown(result, 0, end)
return result
def heapify(a, count):
start = int((count - 2) / 2)
while start >= 0:
siftDown(a, start, count - 1)
start -= 1
def siftDown(a, start, end):
root = start
while (root * 2 + 1) <= end:
child = root * 2 + 1
swap = root
if a[swap] < a[child]:
swap = child
if (child + 1) <= end and a[swap] < a[child + 1]:
swap = child + 1
if swap != root:
a[root], a[swap] = a[swap], a[root]
root = swap
else:
return
|
[
"embluhm@us.ibm.com"
] |
embluhm@us.ibm.com
|
64d6e7dc10b061b4daa8d641010581505d2d704a
|
e8ad4cfd04b0c04a811b2a91e88cb62ff4a95aa8
|
/20180527_13_參數、解包、變量.py
|
a08923a6184b3ee99ad9ef18de979b77f584d09b
|
[] |
no_license
|
sugky7302/Code-for-Learning-Python
|
3bdc61956e1041a88b6b18c0d08edb1f62ba33c4
|
129af7bf23f5a0d2676d69693fde29558769bead
|
refs/heads/master
| 2021-07-19T15:08:09.800176
| 2020-04-29T08:04:13
| 2020-04-29T08:04:13
| 144,469,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from sys import argv
script, first, second, third = argv
print("The script is called:",script)
print("Your first variable is:",first)
print("Your sceond variable is:",second)
print("Your third variable is:",third)
|
[
"sugky7302@gmail.com"
] |
sugky7302@gmail.com
|
14ec29e30beb9428142b51e4d0cb06ebde3e6971
|
a23ec1e8470f87d1b3fa34b01506d6bdd63f6569
|
/algorithms/282. Expression Add Operators.py
|
3f93dd1ffb26e5d220725678cb98469a2ceaaf91
|
[] |
no_license
|
xiaohai0520/Algorithm
|
ae41d2137e085a30b2ac1034b8ea00e6c9de3ef1
|
96945ffadd893c1be60c3bde70e1f1cd51edd834
|
refs/heads/master
| 2023-04-14T17:41:21.918167
| 2021-04-20T13:57:09
| 2021-04-20T13:57:09
| 156,438,761
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
res, self.target = [], target
for i in range(1,len(num)+1):
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], num[:i], int(num[:i]), int(num[:i]), res) # this step put first number in the string
return res
def dfs(self, num, temp, cur, last, res):
if not num:
if cur == self.target:
res.append(temp)
return
for i in range(1, len(num)+1):
val = num[:i]
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], temp + "+" + val, cur+int(val), int(val), res)
self.dfs(num[i:], temp + "-" + val, cur-int(val), -int(val), res)
self.dfs(num[i:], temp + "*" + val, cur-last+last*int(val), last*int(val), res)
class Solution:
def addOperators(self, num: str, target: int) -> List[str]:
results = []
self.helper(num, 0, target, 0, 0, "", results)
return results
def helper(self, string, start, target, sum_so_far, last, path, results):
if start == len(string) and sum_so_far == target:
results.append(path)
for end in range(start+1, len(string)+1):
sub_string = string[start:end]
if len(sub_string) > 1 and sub_string[0] == '0':
break
cur = int(sub_string)
if start == 0:
self.helper(string, end, target, sum_so_far + cur, cur, path + sub_string, results)
else:
self.helper(string, end, target, sum_so_far + cur, cur, path + "+" + sub_string, results)
self.helper(string, end, target, sum_so_far - cur, -cur, path + "-" + sub_string, results)
self.helper(string, end, target, sum_so_far - last + cur * last, cur * last, path + "*" + sub_string, results)
|
[
"noreply@github.com"
] |
xiaohai0520.noreply@github.com
|
b8269d41dafe82ff2472baf61ce5085139da7f96
|
2d95354400adba62d25b6e620d0a6b6254a17270
|
/test_django_APIView/article/migrations/0001_initial.py
|
2fba0b8d98807877a0c7939aca997091f1ade68f
|
[] |
no_license
|
vasyanch/pet_drf
|
b94cc300f3926f7ef38e2501f840ea7b376f3e0a
|
3142aa774e46d53b4809c24e87e6df7d71491f1a
|
refs/heads/master
| 2020-06-08T11:48:49.629280
| 2019-06-22T12:32:39
| 2019-06-22T12:32:39
| 193,223,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
# Generated by Django 2.2.2 on 2019-06-21 08:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField()),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articles', to='article.Author')),
],
),
]
|
[
"vasyanch852@gmail.com"
] |
vasyanch852@gmail.com
|
943bede9e493c57524af600ccc0a7ad2fff6a668
|
0192e1c614ee2853d1f756f94289dcfcb382bbb0
|
/the-dynamo-streamer/python/setup.py
|
a8cf1678b7aa2078e4eb72619b8ed1aea4807d8f
|
[
"MIT"
] |
permissive
|
cloud-architecture/serverless
|
fcb4ef093a61d864f4319f5bc0205af4e5390300
|
d2caa5681f1929523b65b225058229cd298bbc3b
|
refs/heads/master
| 2021-04-19T09:52:53.863381
| 2020-03-22T10:07:16
| 2020-03-22T10:07:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="the_dynamo_streamer",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "the_dynamo_streamer"},
packages=setuptools.find_packages(where="the_dynamo_streamer"),
install_requires=[
"aws-cdk.core==1.23.0",
"aws-cdk.aws-dynamodb==1.23.0",
"aws-cdk.aws_apigateway==1.23.0",
"aws-cdk.aws-lambda==1.23.0",
"aws-cdk.aws-lambda-event-sources==1.23.0",
"aws-cdk.aws-iam==1.23.0"
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
[
"1432881+nideveloper@users.noreply.github.com"
] |
1432881+nideveloper@users.noreply.github.com
|
b2efbf99e9dd402f90a6e40c9f520aff916354d4
|
e6c7485943b4caf2de39e038380f922726990376
|
/Password generator.py
|
cb4cf93c484116d6a1d993ec866ba7418b5dcb80
|
[] |
no_license
|
akashaw/first
|
a42dd61caac1695e014dd6605c8a06a3a8805b2f
|
eb6c7772e84bf764856c36ead6e8616f69fc95aa
|
refs/heads/master
| 2022-10-12T00:36:22.982206
| 2022-10-05T09:26:06
| 2022-10-05T09:26:06
| 236,743,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
hi=input()
hi=hi.split(',')
ans=[]
for a in hi:
l = a.split(':')
string = l[0]
length = len(string)
code = (l[1])
code = list(code)
code.sort()
new_code = []
for i in code:
if int(i) <= length:
new_code.append(i)
if len(new_code) == 0:
element = 'X'
else:
index = int(new_code[-1]) - 1
element = string[index]
ans.append(element)
print(''.join(ans))
|
[
"akash98313@gmail.com"
] |
akash98313@gmail.com
|
69bd617e2910c4a557b82e0697e3ca9ceb6b3480
|
435b0678654de2a0b0b55e25a02b08a79b8a778d
|
/yaoChun/yaoChun/urls.py
|
76fc2c1d357ee82daf1d3ce77fe986bc40f38a9a
|
[] |
no_license
|
yc1220228757/twogit
|
cacc08f5d7d30dd4310708d083343eacec1045d3
|
9ad218b6de23430b68c3ce19612b4e6b3f2cfd70
|
refs/heads/master
| 2020-04-30T21:25:32.933462
| 2019-03-22T07:35:51
| 2019-03-22T07:35:51
| 177,092,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
"""yaoChun URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from bookmanager import views
urlpatterns = [
path('admin/', admin.site.urls),
path('bookmanager/',include('bookmanager.urls')),
]
|
[
"yc1220228757@163.com"
] |
yc1220228757@163.com
|
3487f385af9cf1c3384d8a9a9c5360459fd67f89
|
93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3
|
/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py
|
cb485609b55ec330ad7dff0ed4d10d8a13a8f865
|
[
"Apache-2.0"
] |
permissive
|
hutuxian/Paddle
|
f8b7693bccc6d56887164c1de0b6f6e91cffaae8
|
a1b640bc66a5cc9583de503e7406aeba67565e8d
|
refs/heads/develop
| 2023-08-29T19:36:45.382455
| 2020-09-09T09:19:07
| 2020-09-09T09:19:07
| 164,977,763
| 8
| 27
|
Apache-2.0
| 2023-06-16T09:47:39
| 2019-01-10T02:50:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from pass_test import PassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class FCFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0)
tmp_0 = fluid.layers.fc(input=data,
size=128,
num_flatten_dims=1,
act="relu")
tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1)
tmp_2 = fluid.layers.softmax(input=tmp_1)
self.feeds = {"data": np.random.random((32, 128)).astype("float32")}
self.fetch_list = [tmp_0, tmp_1, tmp_2]
self.pass_names = "fc_fuse_pass"
self.fused_op_type = "fc"
self.num_fused_ops = 2
def test_check_output(self):
use_gpu_set = [False]
if core.is_compiled_with_cuda():
use_gpu_set.append(True)
for use_gpu in use_gpu_set:
self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}}
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
self.check_output_with_place(place, startup_on_cpu=True)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
hutuxian.noreply@github.com
|
c6abbd68abb96d60b47108a11a645b653f08f6d2
|
edfd9d720e180465b4775d1d9734cc7b38df9c5e
|
/main.py
|
4d90c65d3a66de046a69ef96393ae4a47da756b6
|
[] |
no_license
|
jonnyktran/photo-pixelator
|
33748e6bbba9c4b987d62116cc5089bb98099c5d
|
2ae83cd1ff9f3083da2c8598f528553425cdee4b
|
refs/heads/main
| 2023-07-18T18:20:07.922311
| 2021-09-19T08:09:27
| 2021-09-19T08:09:27
| 334,803,004
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,201
|
py
|
from PIL import Image
import numpy as np
from numba import njit
RESURRECT64_PALETTE = [(46, 34, 47), # bastille
(62, 53, 70), # ship gray
(98, 85, 101), # salt box
(150, 108, 108), # copper rose
(171, 148, 122), # sandrift
(105, 79, 98), # don juan
(127, 112, 138), # mobster
(155, 171, 178), # hit gray
(199, 220, 208), # paris white
(255, 255, 255), # white
(110, 39, 39), # nutmeg
(179, 56, 49), # well read
(234, 79, 54), # cinnabar
(245, 125, 74), # jaffa
(174, 35, 52), # mexican red
(232, 59, 59), # cinnabar
(251, 107, 29), # orange
(247, 150, 23), # tree poppy
(249, 194, 43), # saffron
(122, 48, 69), # solid pink
(158, 69, 57), # el salva
(205, 104, 61), # raw sienna
(230, 144, 78), # burnt sienna
(251, 185, 84), # saffron mango
(76, 62, 36), # lisbon brown
(103, 102, 51), # costa del sol
(162, 169, 71), # husk
(213, 224, 75), # wattle
(251, 255, 134), # dolly
(22, 90, 76), # green pea
(35, 144, 99), # eucalyptus
(30, 188, 115), # mountain meadow
(145, 219, 105), # pastel green
(205, 223, 108), # yellow green
(49, 54, 56), # outer space
(55, 78, 74), # mineral green
(84, 126, 100), # como
(146, 169, 132), # sage
(178, 186, 144), # swamp green
(11, 94, 101), # deep sea green
(11, 138, 143), # blue chill
(14, 175, 155), # niagara
(48, 225, 185), # turquoise
(143, 248, 226), # aquamarine
(50, 51, 83), # martinique
(72, 74, 119), # east bay
(77, 101, 180), # san marino
(77, 155, 230), # picton blue
(143, 211, 255), # anakiwa
(69, 41, 63), # livid brown
(107, 62, 117), # affair
(144, 94, 169), # wisteria
(168, 132, 243), # portage
(234, 173, 237), # french lilac
(117, 60, 84), # cosmic
(162, 75, 111), # cadillac
(207, 101, 127), # charm
(237, 128, 153), # carissma
(131, 28, 93), # disco
(195, 36, 84), # maroon flush
(240, 79, 120), # french rose
(246, 129, 129), # froly
(252, 167, 144), # mona lisa
(253, 203, 176)] # light apricot
# Convert image into numpy array of RGB values
def load_img(filename):
pil_img = Image.open(filename).convert('RGB')
max_size = (1280, 1280)
pil_img.thumbnail(max_size, Image.LANCZOS)
return np.array(pil_img.getdata(), dtype=np.uint8).reshape(pil_img.height, pil_img.width, 3)
# Assign each pixel average color of its square and return True if no valid pixels
@njit(fastmath=True)
def assign_average_color(square_row, square_col, height, width, pixel_size, img_arr):
row_start, col_start = square_row * pixel_size, square_col * pixel_size
# Compute average RGB value of square
r, g, b, num_pixels = 0, 0, 0, 0
for row in range(pixel_size):
if (row_start + row) >= height:
break
for col in range(pixel_size):
if (col_start + col) >= width:
break
rgb = img_arr[row_start + row][col_start + col]
r += rgb[0]
g += rgb[1]
b += rgb[2]
num_pixels += 1
if num_pixels < 1:
return True
avg_color = (r / num_pixels, g / num_pixels, b / num_pixels)
# Assign average color to all pixels in square
for row in range(pixel_size):
if (row_start + row) >= height:
break
for col in range(pixel_size):
if (col_start + col) >= width:
break
img_arr[row_start + row][col_start + col] = avg_color
# Pixelate image based on given pixel size
def pixelator(file_path, pixel_size):
img_arr = load_img(file_path)
height, width, pixel_size = len(img_arr), len(img_arr[0]), int(pixel_size)
# Create PIL image in mode 'P' using color palette
palette_list = list(sum(RESURRECT64_PALETTE, ()))
palette_img = Image.new('P', (8, 8))
palette_img.putpalette(palette_list)
# Skip extra computation when pixel size is 1
if pixel_size == 1:
pixel_img = Image.fromarray(img_arr)
pixel_img = pixel_img.quantize(palette=palette_img, dither=0)
return pixel_img.convert('RGB')
# Divide image into squares based on pixel size
square_h, square_w = height//pixel_size + 1, width//pixel_size + 1
for square_row in range(square_h):
for square_col in range(square_w):
# Assign each pixel average color of its square
no_valid_pixels = assign_average_color(square_row, square_col, height, width, pixel_size, img_arr)
if no_valid_pixels:
break
# Use PIL quantize to assign each pixel nearest color from palette
pixel_img = Image.fromarray(img_arr)
pixel_img = pixel_img.quantize(palette=palette_img, dither=0)
return pixel_img.convert('RGB')
|
[
"61168867+jonathanktran@users.noreply.github.com"
] |
61168867+jonathanktran@users.noreply.github.com
|
9006554c4b9386b8a668cade0feaf260ec4e07a4
|
b2ab970816dc7d21ddcd560b90e3096d13307505
|
/0-100/058.py
|
bac114c8f2095dac6aa8cfb39796720dbfa8c564
|
[] |
no_license
|
rillis/ProjectEuler
|
bbdf79ff8878b30a6d6e69738eedc4e99a6295be
|
0dc7fda9fe2f0a4fff44c616ac0369884475c00c
|
refs/heads/master
| 2023-02-09T12:13:56.329545
| 2023-01-29T16:25:30
| 2023-01-29T16:25:30
| 298,775,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
# Time to achieve the answer: 1.5049629s
# Notes: Running in PyPy3
#
# ProjectEuler
# Copyright (c) ProjectEuler - rillis. All rights reserved.
#
# https://github.com/rillis/ProjectEuler
#
import math
import os
import random
import re
import sys
import itertools
import timeit
def isPrime(n):
for x in range(3,int(n**0.5)+1,2):
if n%x==0: return False
return True
def solution(n):
diagonais = [1]
pri = []
intervalo = 1
atual = 1
ratio = 1
while ratio > 0.1:
for x in range(4):
atual+=intervalo+1
diagonais.append(atual)
if isPrime(atual): pri.append(atual)
ratio = len(pri)/len(diagonais)
intervalo+=2
return int(atual**0.5)
if __name__ == "__main__":
n=0
start_t = timeit.default_timer() # DEBUG
print(solution(n))
stop_t = timeit.default_timer() # DEBUG
print("TOTAL RUNTIME:", stop_t - start_t) # DEBUG
|
[
"rillisnelson@gmail.com"
] |
rillisnelson@gmail.com
|
0d616abe175f91c7287f6507c0c2342cd65f0b59
|
6b5d8fd840e8821a6fdd16a3d98030fee48a7764
|
/App/migrations/0015_auto_20190714_1759.py
|
8d0dd47381548e28e32e6a593b31d8d8d2f232b0
|
[] |
no_license
|
LGungnir/DjangoZOL
|
059bc69cfa30ac0e5e532def4ea9cf4adf385e39
|
e3aa4e2bb74b3ac6ee8d7fc772bacecb9fa4cf67
|
refs/heads/master
| 2022-12-22T22:33:31.294869
| 2019-07-18T08:11:15
| 2019-07-18T08:11:15
| 197,489,612
| 0
| 0
| null | 2022-12-08T05:53:52
| 2019-07-18T01:37:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
# Generated by Django 2.2.3 on 2019-07-14 09:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('App', '0014_ordergoods'),
]
operations = [
migrations.CreateModel(
name='Receive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('person', models.CharField(max_length=32)),
('phone', models.CharField(max_length=11)),
('address', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='App.User')),
],
),
migrations.AddField(
model_name='order',
name='receive_info',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='App.Receive'),
),
]
|
[
"cheng_yunbin@sina.com"
] |
cheng_yunbin@sina.com
|
27dd856daa70baf155dfbeae5458fba01503ddc8
|
a2fcfef1e6e2eda66964e208480bce9e1509c3bd
|
/my_blog/urls.py
|
377af2835f18a5e637a33c5404e589e7e3a628a3
|
[] |
no_license
|
zcqshine/my_blog_by_django
|
009fd12ee102a7983993d8be8f718a132954a7fc
|
2a9629383221a4e22ae75193cfa1e0b4527afdcd
|
refs/heads/master
| 2016-09-06T10:45:31.911168
| 2015-01-23T09:45:11
| 2015-01-23T09:45:11
| 29,725,086
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'my_blog.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$','article.views.home'),
# url(r'^(?P<my_args>\d+)/$','article.views.detail',name='detail'),
# url(r'^test/$', 'article.views.test')
)
|
[
"zcqshine@gmail.com"
] |
zcqshine@gmail.com
|
7024ae05ff1844eca1d1409094787f626f186dca
|
98ebce5d5fbf3eb36642b3ffefe51ffcb81e9d5a
|
/uv/asgi.py
|
3a64a8aef60b33aa8102ee73af4ce975123cecea
|
[
"CC0-1.0"
] |
permissive
|
codesankalp/E-commerce-Website
|
cf5d07587e790761b07e68b47669f7cf1f25269b
|
9091a7e27a9e63242b9067377a697196879fc707
|
refs/heads/master
| 2023-01-19T23:29:51.071061
| 2020-11-25T07:35:09
| 2020-11-25T07:35:09
| 286,974,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
"""
ASGI config for uv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uv.settings')
application = get_asgi_application()
|
[
"sankalp123427@gmail.com"
] |
sankalp123427@gmail.com
|
b6bc3862aba3cb915a8c9e6f441026fe7f4447a4
|
f555608150415bd30d452d97621b8675be09046f
|
/contest/sz.py
|
5d683085b0daf32062cb5b4ee092d9d498b6ceb9
|
[] |
no_license
|
luxiao/python
|
b9fcaf4b003cee7d164ce0829939448fe84bbdf6
|
8da50c6f14c97f855f53d526c85329c22fab071f
|
refs/heads/master
| 2020-12-24T16:34:26.599911
| 2019-05-14T08:11:12
| 2019-05-14T08:11:12
| 18,641,100
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
# -*- coding: utf-8 -*-
import types
opr=('+','-','*','/','(',')')
class stack:
def __init__(self,srclist=None):
self.data=[]
self.leng=0
if srclist is not None:
self.data=srclist
self.leng=len(srclist)
def isEmp(self):
if self.leng==0:
True
else:
False
def pop(self):
if not self.isEmp():
self.leng-=1
tmp=self.data[self.leng]
self.data=self.data[:-1]
else:
print 'Stack is Empty'
return tmp
def push(self,x):
self.leng+=1
self.data.append(x)
#print self.data
def calc(exp):
tmp=''
inlist=[]
for i in exp:
if i in opr:
tmp=tmp+' '+i+' '
else:
tmp=tmp+i
for i in tmp.split():
if i in opr:
inlist.append(i)
else:
inlist.append(float(i))
result=str(toNpl(inlist)).strip().replace('[','').replace(']','').replace("'",'')
#print result
tmp=[]
for x in result.split(', '):
if type(x)==types.ListType:
tmp.extend(x)
else:
tmp.append(x)
#print tmp
s=stack()
for i in tmp:
if i not in opr:
s.push(float(i))
else:
x=s.pop()
y=s.pop()
s.push(doit(y,x,i))
#print 'the Result of is '+str(s.pop())
return s.pop()
def doit(x,y,op):
if op=='+':
r=x+y
elif op=='-':
r=x-y
elif op=='*':
r=x*y
elif op=='/':
r=x/y
else:
r=None
print 'Unknown operator!'
return r
def toNpl(explist):
"""中缀表达式处理成逆波兰表达式"""
kh=('(',')')
result=[]
#eliminate brackets
c=explist.count(kh[0])
if c>0:
i=explist.index(kh[0])
result=explist[:i]
subl=explist[i+1:]
j=1
for e in xrange(len(subl)):
if j>0:
if subl[e]==kh[0]:
j+=1
elif subl[e]==kh[1]:
j-=1
if j==0:
break
tmp=[]
tmp=toNpl(subl[:e])
if e<len(subl)-1:
result.append(tmp)
result.extend(subl[e+1:])
return toNpl(result)
result.append(tmp)
result=parse(result)
else:
result=parse(explist)
return result
def parse(explist):
xc=('*','/')
result=[]
i=0
while(i<len(explist)):
if explist[i] in xc:
tmp=(type(result[-1])==types.ListType and [x for x in result[-1]] or [result[-1]]) +[explist[i+1],explist[i]]
result=result[:-1]
result.append(tmp)
i+=1
else:
result.append(explist[i])
i+=1
i=1
while(i<len(result)):
result[i],result[i+1]=result[i+1],result[i]
i+=2
return result
def test():
for e in ['1+(2-3*(4/5))-6+7/4*(8-9)','1-(2-3)-4','1*(2-(0+4)/5)','((128+35)*3)','4000/((25-13)*12)','89*(((2+86)))']:
print calc(e),float(eval(e)),int(calc(e))==eval(e)
if __name__=='__main__':
test()
|
[
"luxiao1223@gmail.com"
] |
luxiao1223@gmail.com
|
114d19bb3eed178fe475e6e9b4c627a69fd2f3e1
|
2beaf81f068217ac74ca616ec7a2e24a4c4597d8
|
/codechef/choprt.py
|
63538b87704f4a29286a6cfa52948665160f5420
|
[] |
no_license
|
CodeForFun-r/Competitive-Programming
|
03b0751e5c00b0c979d413c934a881dde15caedc
|
c93d6e7f6db964bc9c8b54caab535c2b52d6ffec
|
refs/heads/master
| 2022-12-15T11:41:36.352931
| 2020-09-02T10:52:37
| 2020-09-02T10:52:37
| 285,487,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
for i in range(int(input())):
a, b = map(int, input().split(' '))
if a > b:
print('>')
elif a < b:
print('<')
else:
print('=')
|
[
"noreply@github.com"
] |
CodeForFun-r.noreply@github.com
|
8cb2231586c50d4a9c40daaa55cfe424fba86cc0
|
a994be46a4deef9e7d7aaea32d1714b6d849344c
|
/data/generate_train.py
|
5f8d6e9fac76128c688581eef7c112d73005aa43
|
[] |
no_license
|
lion-ops/slim_yolov3
|
7abec2242a491c5af754e9a5d4c96cacf4ef74ba
|
fb4d4327d7bb4433d2e7824d4e83c53ce3403c60
|
refs/heads/master
| 2023-07-19T11:22:13.251017
| 2020-09-01T07:10:44
| 2020-09-01T07:10:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
import os
images_path = os.listdir('images')
with open('train.txt', 'w+') as f:
for image_path in images_path:
if not image_path.startswith('COCO_val2017'):
f.write('data/images/'+image_path+'\n')
|
[
"noreply@github.com"
] |
lion-ops.noreply@github.com
|
577029548b9dd4fdd6fbd9bce95f20f25c58623e
|
aaf596efff64bf626b9dc2b7279ceb195b2d8eea
|
/pylearnadvance/thread/demo1.py
|
e6d05888ab3e54386bba8c79d3cb053a1202c0fb
|
[] |
no_license
|
gongyangyu/pythonprojects
|
4a8d48453f606f8ca87d3860f8c98104596e456d
|
03ffd8932f23bd98538b8e88c9c4567852ec030f
|
refs/heads/master
| 2020-07-12T03:43:38.215350
| 2019-09-16T13:31:26
| 2019-09-16T13:31:26
| 204,709,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import threading
import time
def sing():
"""唱歌5s"""
for i in range(5):
print("----------正在唱歌----------")
time.sleep(1)
def dance():
"""跳舞5s"""
for i in range(5):
print("----------正在跳舞----------")
time.sleep(1)
def main():
t1=threading.Thread(target=sing)
t2=threading.Thread(target=dance)
t1.start()
t2.start()
if __name__ == "__main__":
main()
|
[
"1665465903@qq.com"
] |
1665465903@qq.com
|
bf722e10a81a9c4b67683f0102f4ca1a2cf02a31
|
710e46cdf24c163503d5d9db25ff7b6f428a4e8e
|
/__init__.py
|
f717c2f41b445ac739959cc52dd1236f7c7f1769
|
[] |
no_license
|
lloyd10029523/mapillary
|
e81ed370337d85706cbdfd7a06671cbe79ec7d80
|
841173a8240d4a41dbfb03d01ea03eb55e75b1cc
|
refs/heads/master
| 2021-01-22T22:02:50.948938
| 2016-09-22T13:59:20
| 2016-09-22T13:59:20
| 92,751,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
mapillary
A QGIS plugin
mapillary
-------------------
begin : 2015-01-20
copyright : (C) 2015 by geodrinx
email : geodrinx@gmail.com
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load mapillary class from file mapillary.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .mapillary import mapillary
return mapillary(iface)
|
[
"geodrinx@gmail.com"
] |
geodrinx@gmail.com
|
699ce16dc10d9a10a12ff340c9bb8f01c5096fbf
|
0e3992c909ab923bf3897a4568d467bef73c0909
|
/peacenik-exp-framework/src/option/benchmarks.py
|
096ad8ccf9c8cdb967857f10d5398ca5130470d9
|
[] |
no_license
|
PLaSSticity/peacenik-simulators-asplos20
|
c4c3b5302e3aa6d3063c35ccdc3f95424e477c72
|
66d187650daf1aedb5c76fefee6d20529a56e27a
|
refs/heads/master
| 2020-12-10T19:50:38.369905
| 2020-02-04T12:25:54
| 2020-02-04T12:25:54
| 233,693,358
| 0
| 0
| null | 2020-02-04T12:25:56
| 2020-01-13T21:05:10
|
Java
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
class Benchmark:
"""Wrapper class for allowed benchmarks."""
PARSEC = ["blackscholes", "bodytrack", "canneal", "dedup", "facesim",
"ferret", "fluidanimate", "raytrace", "streamcluster",
"swaptions", "vips", "x264"]
SPLASH2X = ["barnes", "cholesky", "fft", "fmm", "lu_cb", "lu_ncb",
"ocean_cp", "ocean_ncp", "radiosity", "radix", "raytrace",
"volrend", "water_nsquared", "water_spatial"]
HTTPD = ["httpd", "mysqld"]
@staticmethod
def isParsecBenchmark(bench):
if bench in Benchmark.PARSEC:
return True
return False
@staticmethod
def isSplash2xBenchmark(bench):
if bench in Benchmark.SPLASH2X:
return True
return False
@staticmethod
def isHTTPDBenchmark(bench):
if bench in Benchmark.HTTPD:
return True
return False
|
[
"ruizhang1217@yahoo.com"
] |
ruizhang1217@yahoo.com
|
c73005c81aaec8e7c0613dea2e18f7b12afbb9dd
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/双指针(two points)/16. 3Sum Closest.py
|
999c518d3f03cb923594bff7a42b551b460d21fb
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862
| 2021-07-15T08:54:07
| 2021-07-15T08:54:07
| 261,663,876
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
'''
Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
Example:
Given array nums = [-1, 2, 1, -4], and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
'''
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
"""
思路:双指针
1. 主要在于优化
2. 首先得排序
3. if i>0 and nums[i]==nums[i-1]:continue
4. 如果最小的三个数大于target,此时最接近target的数已存在
5. 如果最大的三个数小于target,continue
"""
nums.sort()
n = len(nums)
res = nums[0] + nums[1] + nums[2]
for i in range(n - 2):
l, r = i + 1, n - 1
if i > 0 and nums[i] == nums[i - 1]:
continue
threeSum = nums[i] + nums[i + 1] + nums[i + 2]
if threeSum >= target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
return res
if nums[i] + nums[-1] + nums[-2] < target:
res = nums[i] + nums[-1] + nums[-2]
continue
while l < r:
threeSum = nums[i] + nums[l] + nums[r]
if threeSum < target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
l += 1
# 连续的数相等,则跳过
while l < r and nums[l] == nums[l - 1]:
l += 1
elif threeSum > target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
r -= 1
# 连续的数相等,则跳过
while l < r and nums[r] == nums[r + 1]:
r -= 1
else:
return target
return res
if __name__ == '__main__':
nums = [-1, 2, 1, -4]
target = 1
print(Solution().threeSumClosest(nums, target))
|
[
"1129079384@qq.com"
] |
1129079384@qq.com
|
f1349d5120e8905215322a1cd477100e62f5b62b
|
1e9838ef39e44060ff7f04d6d830812ce5fed99f
|
/Python/Kaggle_ETL_Submisson.py
|
df444ea1dbf4ea48d6a89785c5271704dcc05cae
|
[] |
no_license
|
Greeshma-Venkatesh/DB225-pantrychef-project
|
61d6892781e2e2c0b39d4c603b9595a1d6b7fbe9
|
27099a1b72e974f5e43a6c4ead90b4c9008cdef5
|
refs/heads/master
| 2023-08-20T08:55:57.793807
| 2021-05-13T01:39:46
| 2021-05-13T01:39:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,192
|
py
|
# Property of SJSU Data225 Project Group3
# Version Date Author Desc
# 1 04/27/2021 Rishi Srivastava Initial Version
# 2 04/30/2021 Payal Added function to load Stats data
# 3 05/05/2021 Greeshma,Jyoti Peer Review
# Below Program does a full pull of Recipe data from four different sources and load into Oracle Recipe Data Mart
import csv
import cx_Oracle
import io
import time
import json
recipeData = []
ingredientData = []
recipeStepsData = []
#break_ct = 100000
def oracleConnection():
try:
conn = cx_Oracle.connect('****/****')
print("Connection established")
return conn
except Exception as e:
print("Exception occurrred")
def loadData(data,s):
try:
sql = s
conn = oracleConnection()
c = conn.cursor()
ct = 0
for i in data:
ct+=1
#print(i)
try:
c.execute(sql,i)
except Exception as e:
print("Exception occurred while executing sql:",str(e))
print(i)
if ct % 1000 == 0:
print(str(ct),' : rows processed')
conn.commit()
#print(i)
print("Table Loaded successfully")
except Exception as e:
print("Exception occurred ")
print(str(e))
conn.rollback()
finally:
conn.commit()
c.close()
conn.close()
print("Connection and Cursor Closed")
def processKaggleData():
recipeHeader = ['title_id','title','date','categories','calories','desc','protein','rating','sodium']
global recipeData
global ingredientData
global recipeStepsData
global break_ct
fileLoc = "C:/Users/***/***/****/Kaggle/"
try:
fName = "full_format_recipes.json"
# Opening JSON file
f = open(fileLoc+fName)
# returns JSON object as a dictionary
data = json.load(f)
ct = 0
key = set()
title_id = 1
titleId = ''
for row in data:
ct+=1
#print(row)
temprecipeData = []
for col in recipeHeader:
if col == 'title_id':
titleId = "KAGGLE-"+str(title_id)
temprecipeData.append(titleId)
elif col == 'categories':
temprecipeData.append(None)
else:
temprecipeData.append(row[col])
recipeData.append(temprecipeData)
for key,val in row.items():
if key == 'directions':
for count, value in enumerate(val,1):
tempRecipeSteps = []
#print(count, value)
tempRecipeSteps.append(titleId)
tempRecipeSteps.append(count)
tempRecipeSteps.append(value)
recipeStepsData.append(tempRecipeSteps)
for key,val in row.items():
if key == 'ingredients':
for count, value in enumerate(val,1):
tempIngredients = []
#print(count, value)
tempIngredients.append(titleId)
tempIngredients.append(count)
tempIngredients.append(value.replace("ADVERTISEMENT",""))
ingredientData.append(tempIngredients)
title_id+=1
#if ct == break_ct:
# break
f.close()
except Exception as e:
print("Exception Occurred:",str(e))
def processAllrecipesData():
global recipeData
global ingredientData
global recipeStepsData
global break_ct
fileLoc = "C:/Users/***/***/****/EightPortions/"
try:
fName = "recipes_raw_nosource_ar.json"
# Opening JSON file
f = open(fileLoc+fName)
# returns JSON object as a dictionary
data = json.load(f)
ct = 0
for key,val in data.items():
#print(key,val)
temprecipeData = []
titleId = key
temprecipeData.append(titleId)
for k,v in val.items():
if k == 'title':
temprecipeData.append(v)
elif k == 'ingredients':
for count, value in enumerate(v,1):
tempIngredients = []
#print(count, value)
tempIngredients.append(titleId)
tempIngredients.append(count)
tempIngredients.append(value)
ingredientData.append(tempIngredients)
elif k == 'instructions':
lst = v.split(".")
for count, value in enumerate(lst,1):
tempRecipeSteps = []
#print(count, value)
tempRecipeSteps.append(titleId)
tempRecipeSteps.append(count)
tempRecipeSteps.append(value)
recipeStepsData.append(tempRecipeSteps)
recipeData.append(temprecipeData)
ct+=1
#if ct == break_ct:
# break
f.close()
except Exception as e:
print("Exception Occurred:",str(e))
def processEpicuriousData():
global recipeData
global ingredientData
global recipeStepsData
global break_ct
fileLoc = "C:/Users/***/***/****/EightPortions/"
try:
fName = "recipes_raw_nosource_epi.json"
# Opening JSON file
f = open(fileLoc+fName)
# returns JSON object as a dictionary
data = json.load(f)
ct = 0
for key,val in data.items():
#print(key,val)
temprecipeData = []
titleId = key
temprecipeData.append(titleId)
for k,v in val.items():
if k == 'title':
temprecipeData.append(v)
elif k == 'ingredients':
for count, value in enumerate(v,1):
tempIngredients = []
#print(count, value)
tempIngredients.append(titleId)
tempIngredients.append(count)
tempIngredients.append(value)
ingredientData.append(tempIngredients)
elif k == 'instructions':
lst = v.split(".")
for count, value in enumerate(lst,1):
tempRecipeSteps = []
#print(count, value)
tempRecipeSteps.append(titleId)
tempRecipeSteps.append(count)
tempRecipeSteps.append(value)
recipeStepsData.append(tempRecipeSteps)
recipeData.append(temprecipeData)
ct+=1
#if ct == break_ct:
# break
f.close()
except Exception as e:
print("Exception Occurred:",str(e))
def processFoodnetworkData():
global recipeData
global ingredientData
global recipeStepsData
global break_ct
fileLoc = "C:/Users/***/***/****/EightPortions/"
try:
fName = "recipes_raw_nosource_fn.json"
# Opening JSON file
f = open(fileLoc+fName)
# returns JSON object as a dictionary
data = json.load(f)
ct = 0
for key,val in data.items():
#print(key,val)
temprecipeData = []
titleId = key
temprecipeData.append(titleId)
for k,v in val.items():
if k == 'title':
temprecipeData.append(v)
elif k == 'ingredients':
for count, value in enumerate(v,1):
tempIngredients = []
#print(count, value)
tempIngredients.append(titleId)
tempIngredients.append(count)
tempIngredients.append(value)
ingredientData.append(tempIngredients)
elif k == 'instructions':
lst = v.split(".")
for count, value in enumerate(lst,1):
tempRecipeSteps = []
#print(count, value)
tempRecipeSteps.append(titleId)
tempRecipeSteps.append(count)
tempRecipeSteps.append(value)
recipeStepsData.append(tempRecipeSteps)
recipeData.append(temprecipeData)
ct+=1
#if ct == break_ct:
# break
f.close()
except Exception as e:
print("Exception Occurred:",str(e))
def processRecipeStats():
import csv
import io
import time
fileLoc = "C:/Users/***/***/****/Kaggle/"
fName = "epi_r_formatted.csv"
ct = 0
readerList = []
try:
csv_file = open(fileLoc+fName, "r",encoding="utf8")
reader = csv.reader(csv_file, delimiter=',',lineterminator="\n")
next(reader)
sql = "insert into pantrydb.recipe_stats VALUES (:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,:31,:32,:33,:34,:35,:36,:37,:38,:39,:40,:41,:42,:43,:44,:45,:46,:47,:48,:49,:50,:51,:52,:53,:54,:55,:56,:57,:58,:59,:60,:61,:62,:63,:64,:65,:66,:67,:68,:69,:70,:71,:72,:73,:74,:75,:76,:77,:78,:79,:80,:81,:82,:83,:84,:85,:86,:87,:88,:89,:90,:91,:92,:93,:94,:95,:96,:97,:98,:99,:100,:101,:102,:103,:104,:105,:106,:107,:108,:109,:110,:111,:112,:113,:114,:115,:116,:117,:118,:119,:120,:121,:122,:123,:124,:125,:126,:127,:128,:129,:130,:131,:132,:133,:134,:135,:136,:137,:138,:139,:140,:141,:142,:143,:144,:145,:146,:147,:148,:149,:150,:151,:152,:153,:154,:155,:156,:157,:158,:159,:160,:161,:162,:163,:164,:165,:166,:167,:168,:169,:170,:171,:172,:173,:174,:175,:176,:177,:178,:179,:180,:181,:182,:183,:184,:185,:186,:187,:188,:189,:190,:191,:192,:193,:194,:195,:196,:197,:198,:199,:200,:201,:202,:203,:204,:205,:206,:207,:208,:209,:210,:211,:212,:213,:214,:215,:216,:217,:218,:219,:220,:221,:222,:223,:224,:225,:226,:227,:228,:229,:230,:231,:232,:233,:234,:235,:236,:237,:238,:239,:240,:241,:242,:243,:244,:245,:246,:247,:248,:249,:250,:251,:252,:253,:254,:255,:256,:257,:258,:259,:260,:261,:262,:263,:264,:265,:266,:267,:268,:269,:270,:271,:272,:273,:274,:275,:276,:277,:278,:279,:280,:281,:282,:283,:284,:285,:286,:287,:288,:289,:290,:291,:292,:293,:294,:295,:296,:297,:298,:299,:300,:301,:302,:303,:304,:305,:306,:307,:308,:309,:310,:311,:312,:313,:314,:315,:316,:317,:318,:319,:320,:321,:322,:323,:324,:325,:326,:327,:328,:329,:330,:331,:332,:333,:334,:335,:336,:337,:338,:339,:340,:341,:342,:343,:344,:345,:346,:347,:348,:349,:350,:351,:352,:353,:354,:355,:356,:357,:358,:359,:360,:361,:362,:363,:364,:365,:366,:367,:368,:369,:370,:371,:372,:373,:374,:375,:376,:377,:378,:379,:380,:381,:382,:383,:384,:385,:386,:387,:388,:389,:390,:391,:392,:393,:394,:395,:396,:397,:398,:399,:400,:401,:402,:403,:404,:405,:406,:407,:408,:409,:410,:411,:412,:413,:414,:415,:416,:417,:418,:419,:420,:421,:422,:423,:424,:425,:426,:427,:428,:429,:430,:431,:432,:433,:434,:435,:436,:437,:438,:439,:440,:441,:442,:443,:444,:445,:446,:447,:448,:449,:450,:451,:452,:453,:454,:455,:456,:457,:458,:459,:460,:461,:462,:463,:464,:465,:466,:467,:468,:469,:470,:471,:472,:473,:474,:475,:476,:477,:478,:479,:480,:481,:482,:483,:484,:485,:486,:487,:488,:489,:490,:491,:492,:493,:494,:495,:496,:497,:498,:499,:500,:501,:502,:503,:504,:505,:506,:507,:508,:509,:510,:511,:512,:513,:514,:515,:516,:517,:518,:519,:520,:521,:522,:523,:524,:525,:526,:527,:528,:529,:530,:531,:532,:533,:534,:535,:536,:537,:538,:539,:540,:541,:542,:543,:544,:545,:546,:547,:548,:549,:550,:551,:552,:553,:554,:555,:556,:557,:558,:559,:560,:561,:562,:563,:564,:565,:566,:567,:568,:569,:570,:571,:572,:573,:574,:575,:576,:577,:578,:579,:580,:581,:582,:583,:584,:585,:586,:587,:588,:589,:590,:591,:592,:593,:594,:595,:596,:597,:598,:599,:600,:601,:602,:603,:604,:605,:606,:607,:608,:609,:610,:611,:612,:613,:614,:615,:616,:617,:618,:619,:620,:621,:622,:623,:624,:625,:626,:627,:628,:629,:630,:631,:632,:633,:634,:635,:636,:637,:638,:639,:640,:641,:642,:643,:644,:645,:646,:647,:648,:649,:650,:651,:652,:653,:654,:655,:656,:657,:658,:659,:660,:661,:662,:663,:664,:665,:666,:667,:668,:669,:670,:671,:672,:673,:674,:675,:676,:677,:678,:679,:680)"
conn = oracleConnection()
c = conn.cursor()
for i in reader:
ct+=1
#print(i)
try:
c.execute(sql,i)
except Exception as e1:
print(i)
print("Exception happend while executing sql", str(e1))
continue
if ct % 10000 == 0:
print(str(ct),' : rows processed')
print("Table Loaded successfully")
except Exception as e:
print("Exception occurred ")
print(str(e))
#conn.rollback()
finally:
conn.commit()
conn.close()
print(str(ct),' : rows processed')
print("Connection and Cursor Closed")
def main():
global recipeData
global ingredientData
global recipeStepsData
try:
processKaggleData() #function to process Kaggle data
recipeSql = "insert into PANTRYDB.RECIPE(TITLE_ID,TITLE,CREATED_DATE,CATEGORIES,CALORIES,DESCRIPTION,PROTEIN,RATING,SODIUM) VALUES (:1,:2,:3,:4,:5,:6,:7,:8,:9)"
loadData(recipeData,recipeSql)
ingredientSql = "insert into PANTRYDB.RECIPE_INGREDIENTS(TITLE_ID,INGREDIENT_ID,INGREDIENT_NAME) VALUES (:1,:2,:3)"
loadData(ingredientData,ingredientSql)
stepSql = "insert into PANTRYDB.RECIPE_DIRECTIONS(TITLE_ID,SEQ,NAME) VALUES (:1,:2,:3)"
loadData(recipeStepsData,stepSql)
recipeData = []
ingredientData = []
recipeStepsData = []
processAllrecipesData() #function call to process AllRecipe data
processEpicuriousData() #function call to process EPICurious data
processFoodnetworkData() #function call to process Food Network data
recipeSql = "insert into PANTRYDB.RECIPE(TITLE_ID,TITLE) VALUES (:1,:2)"
loadData(recipeData,recipeSql)
ingredientSql = "insert into PANTRYDB.RECIPE_INGREDIENTS(TITLE_ID,INGREDIENT_ID,INGREDIENT_NAME) VALUES (:1,:2,:3)"
loadData(ingredientData,ingredientSql)
stepSql = "insert into PANTRYDB.RECIPE_DIRECTIONS(TITLE_ID,SEQ,NAME) VALUES (:1,:2,:3)"
loadData(recipeStepsData,stepSql)
processRecipeStats()
except Exception as e:
print("Exception occurred",str(e))
if __name__ == '__main__':
if main():
True
else:
False
|
[
"rishi.wip@gmail.com"
] |
rishi.wip@gmail.com
|
4ea047de73216b163225392635a3e6088aafa37f
|
b449d885991fc4d03eed8507d3fd57bd1e5ee144
|
/question_no_20.py
|
ced99ce48c1c3a3781cafc2d352775014a8d1536
|
[] |
no_license
|
vksudin/python-lab
|
ea8f0ab69b1939396a7a559da64cc8e69391143d
|
653420600ea85680d651df54b7ddee24ad09a19c
|
refs/heads/main
| 2023-03-15T19:25:10.396968
| 2021-03-06T02:23:58
| 2021-03-06T02:23:58
| 344,852,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 20:48:28 2021
@author: sudin jana
"""
def anagram(a,b):
print(a,b)
if len(a)!=len(b):
print("Not anagrams")
return
a=sorted(a)
b=sorted(b)
for i in range(len(a)):
if a[i]!=b[i]:
print("Not anagrams")
return
print("Anagrams")
anagram('LISTEN','SILENT')
anagram('TRIANGLE','INTEGRAL')
anagram('aba','ccc')
def convert(l):
print(l)
t=tuple(l)
print(t)
convert([1,23,3])
|
[
"noreply@github.com"
] |
vksudin.noreply@github.com
|
5f230592b6eb6fe910fff31b71e67880f9692b82
|
de72607be743b6c87005bf4e3da54bc80abd6dd5
|
/MP1/MP1_Unicast.py
|
3cc404f3bb53e835133f0e961fdda341ec7a0412
|
[] |
no_license
|
xiashang0624/Distribution_system_MP2
|
16709699b7146826d9e215cd4dd49555fb177ee2
|
0caf982a8f2444440b25af1232bafb4976491b31
|
refs/heads/master
| 2020-03-07T11:42:16.086436
| 2018-05-02T21:35:40
| 2018-05-02T21:35:40
| 127,461,980
| 0
| 1
| null | 2018-04-25T05:37:51
| 2018-03-30T18:47:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
import socket, time, threading
from random import randint
import pdb
# This program is designed to perform unicast with a option to allow delay.
# define a function to listen
def Listen(server_socket):
while True:
try:
recieve_msg,addr = server_socket.recvfrom(1024)
recieve_time = time.asctime().split()[3]
print ('Received '+ str('"') + str(recieve_msg.decode('utf-8')) + str('"') +
' from process {}, system time is '.format(addr[-1]%10) + recieve_time)
except:
time.sleep(0.05)
pass
# define a function to unicast
def Unicast(client_socket):
while True:
message = input()
msg = message.split()
if not msg or msg[0] != 'send':
print('Error input, unicast message should use the following format: send destination msg')
pass
else:
target = int(msg[1])
message = message[7:] # remove send and target number from input string
send_time = time.asctime().split()[3]
print ('Send '+ str('"') + message + str('"') +
' to process {}, system time is '.format(target)+ send_time)
Delay(client_socket, target, message)
# implement the delay mechanism
# if we want to reomve the delay mechasnim, set the delay_time to 0
def Delay(client_socket, target, message):
delay_time = randint(min_delay, max_delay)/1000.0 #set it to 0 to remove the delay mechanism
time.sleep(delay_time)
client_socket.sendto(message.encode('utf-8'), addr_list[target])
# Unordered multicast
def Multicast_unorder(client_socket):
while True:
message = input()
for i in range(4):
client_socket.sendto(message.encode('utf-8'),addr_list[i])
# Initialize the process information: process number, host address, and IPk
# address
# read the config file
with open('config.txt') as f:
content = f.readlines()
# save the min_delay and max_delay in two varibles
min_delay, max_delay = content[0].strip().split()
min_delay, max_delay = int(min_delay), int(max_delay)
# save the other information in the port_info list
port_info = []
for i in content[1:-2]:
port_info.append(i.strip().split())
# get the process IP and port info based on the selected number
def process_info(number):
address = port_info[number][1]
port = int(port_info[number][2])
return (address, port)
addr_list = []
for i in range(4):
addr_list.append(process_info(i))
process_number = 9999
while process_number not in {1,2,3,0}:
process_number = input('Select the process number from 0-3:' )
process_number = int(process_number)
print('The process number selected is: {}'.format(process_number))
# bind socket to the ip address based on the config file
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(addr_list[process_number])
# a thread is used to recieve incoming message
receive_thread = threading.Thread(target=Listen, args=(s,))
receive_thread.start()
# Unicast
Unicast(s)
# the main program for multicast message
#Multicast_unorder(s)
|
[
"xshang3@gelib-4c-41.ews.illinois.edu"
] |
xshang3@gelib-4c-41.ews.illinois.edu
|
244f722f90ec59016d843f887036ec423dc5b19e
|
80874bb26208b6c82ef558a904f97d45c7e23742
|
/airflow/airflow/dags/load_dimension_table_subdag.py
|
4fd72c1c957cfa98c4e6a991e6c25c5d1f83cbef
|
[] |
no_license
|
Mousumi-Singha/data-pipeline-airflow
|
2fe8d25c5705e6a23e7887da2a604b7d95b01fd7
|
43194b47f539ca8ca948f9b1bfcdeff18f630506
|
refs/heads/master
| 2023-04-04T11:59:46.633082
| 2021-03-31T05:01:33
| 2021-03-31T05:01:33
| 353,217,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
import logging
from airflow import DAG
from airflow.operators import LoadDimensionOperator
def get_load_dimension_table_subdag(
parent_dag_name,
task_id,
default_args,
postgres_conn_id,
sql_queries,
tables,
truncate_flags,
*args,
**kwargs):
dag = DAG(
dag_id=f'{parent_dag_name}.{task_id}',
default_args=default_args,
**kwargs,
)
if (len(tables) != len(sql_queries)) or (len(sql_queries) != len(truncate_flags)):
logging.error('Tables, SQL queries and truncate settings not of same length')
raise ValueError('Tables, SQL queries and truncate settings not of same length')
tasks = []
for table, query, truncate in zip(tables, sql_queries, truncate_flags):
task = LoadDimensionOperator(
task_id=f'Load_{table}_dim_table',
dag=dag,
postgres_conn_id=postgres_conn_id,
sql=query,
table=table,
truncate=truncate,
)
tasks.append(task)
return dag
|
[
"mousumisingha90@gmail.com"
] |
mousumisingha90@gmail.com
|
2d7ea85777003a35886a2ed9a54c7eacb02feeac
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/4/kfa.py
|
fa58bfbad8c36ae3ec8f5d8bfeacb3f8cb899cab
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'kFA':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
7576f8247220729c624ef1a5b86bbe6d99d3a674
|
64d7e4aa68678467798ff776156e9e571d9c2afc
|
/mazeGame.py
|
441b9bc8cd250b6a5a051f2bab7e04303456ea6c
|
[] |
no_license
|
yozeff/mazeGame
|
29fd113ae3e30d0d343b355ca6c4c1490d6a8ca7
|
30a34d99490f3f41e74b28b9924263a43bfa233e
|
refs/heads/master
| 2020-05-09T15:06:20.251008
| 2019-04-13T19:54:38
| 2019-04-13T19:54:38
| 181,221,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,282
|
py
|
from mazeArray import make_arr
from mazeArray import make_maze
import pygame
import random as r
#size of each block
SIZE = 10
WALL_COL = (255, 0, 0)
PLAYER_COL = (255, 255, 0)
END_COL = (255, 0, 255)
def game_loop(arr, pi, pj, ei, ej):
global blocks
blocks = []
#screen size
X, Y = SIZE * len(arr[0]), len(arr) * SIZE
pygame.init()
screen = pygame.display.set_mode((X, Y))
pygame.display.set_caption('maze game')
clock = pygame.time.Clock()
#game loop
running = True
while running:
clock.tick(10)
#handle os quiting
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
screen.fill((0,0,0))
#handle io
key = pygame.key.get_pressed()
#w for up
if key[pygame.K_w] and arr[pi - 1][pj] != '=':
pi -= 1
#a for left
elif key[pygame.K_a] and arr[pi][pj - 1] != '=':
pj -= 1
#s for down
elif key[pygame.K_s] and arr[pi + 1][pj] != '=':
pi += 1
#d for right
elif key[pygame.K_d] and arr[pi][pj + 1] != '=':
pj += 1
#draw walls
x, y = 0, 0
for row in arr:
for item in row:
if item == '=':
pygame.draw.rect(screen, WALL_COL,
pygame.Rect(x, y, SIZE, SIZE))
x += SIZE
y += SIZE
x = 0
#draw player
pygame.draw.rect(screen, PLAYER_COL,
pygame.Rect(pj * SIZE, pi * SIZE, SIZE, SIZE))
#draw end
pygame.draw.rect(screen, END_COL,
pygame.Rect(ej * SIZE, ei * SIZE, SIZE, SIZE))
#check if the player has won
if pi == ei and pj == ej:
print("you've won!")
running = False
pygame.display.flip()
#if we leave the game loop
#close the window
pygame.quit()
def main():
cont = ''
while cont != 'q':
#get difficulty from user
flag = False
while not flag:
diff = input('easy (e), medium (m) or hard (h): ')
if diff == 'e':
I, J = 21, 21
flag = True
elif diff == 'm':
I, J = 31, 31
flag = True
elif diff == 'h':
I, J = 41, 41
flag = True
#create the maze array
arr = make_arr(I, J)
arr = make_maze(arr, (1, 1))
#place the player
wi, wj = (I - 1) // 2 - 1, (J - 1) // 2 - 1
pi, pj = r.randint(0, wi), r.randint(0, wj)
pi, pj = 2 * pi + 1, 2 * pj + 1
#place end
ei, ej = r.randint(0, wi), r.randint(0, wj)
#make sure we are not placing the end on the player
while ei == pi and ej == pj:
ei, ej = r.randint(0, wi), r.randint(0, wj)
ei, ej = 2 * ei + 1, 2 * ej + 1
#run game with maze array
game_loop(arr, pi, pj, ei, ej)
cont = input("'q' to quit: ")
print('exiting...')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
yozeff.noreply@github.com
|
3e6274f68a32a64cdaad8f145058730bafa63415
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Sklearn_scipy_numpy/source/numpy/distutils/system_info.py
|
d7eb49ecd6b2ad37af5555202623dde6a903977b
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 85,533
|
py
|
#!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropiate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <pearu@cens.ioc.ee>, February 2002
David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError, ConfigParser
else:
from configparser import NoOptionError, ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {}
defaults['library_dirs'] = os.pathsep.join(default_lib_dirs)
defaults['include_dirs'] = os.pathsep.join(default_include_dirs)
defaults['runtime_library_dirs'] = os.pathsep.join(default_runtime_dirs)
defaults['rpath'] = ''
defaults['src_dirs'] = os.pathsep.join(default_src_dirs)
defaults['search_static_first'] = str(self.search_static_first)
defaults['extra_compile_args'] = ''
defaults['extra_link_args'] = ''
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
return self.get_paths(self.section, key)
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
# Debian and Ubuntu added a g3f suffix to shared library to deal with
# g77 -> gfortran ABI transition
# XXX: disabled, it hides more problem than it solves.
#if sys.platform[:5] == 'linux':
# exts.append('.so.3gf')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _lib_list(self, lib_dir, libs, exts):
assert is_string(lib_dir)
liblist = []
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for l in libs:
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + l + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
l += '.dll'
liblist.append(l)
break
return liblist
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
# First, try to find the mandatory libraries
if is_sequence(lib_dirs):
found_libs, found_dirs = [], []
for dir_ in lib_dirs:
found_libs1 = self._lib_list(dir_, libs, exts)
# It's possible that we'll find the same library in multiple
# directories. It's also possible that we'll find some
# libraries on in directory, and some in another. So the
# obvious thing would be to use a set instead of a list, but I
# don't know if preserving order matters (does it?).
for found_lib in found_libs1:
if found_lib not in found_libs:
found_libs.append(found_lib)
if dir_ not in found_dirs:
found_dirs.append(dir_)
else:
found_libs = self._lib_list(lib_dirs, libs, exts)
found_dirs = [lib_dirs]
if len(found_libs) > 0 and len(found_libs) == len(libs):
info = {'libraries': found_libs, 'library_dirs': found_dirs}
# Now, check for optional libraries
if is_sequence(lib_dirs):
for dir_ in lib_dirs:
opt_found_libs = self._lib_list(dir_, opt_libs, exts)
if opt_found_libs:
if dir_ not in found_dirs:
found_dirs.extend(dir_)
found_libs.extend(opt_found_libs)
else:
opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts)
if opt_found_libs:
found_libs.extend(opt_found_libs)
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKL'
_lib_mkl = ['mkl', 'vml', 'guide']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
l = 'mkl' # use shared library
if cpu.is_Itanium():
plt = '64'
#l = 'mkl_ipf'
elif cpu.is_Xeon():
plt = 'intel64'
#l = 'mkl_intel64'
else:
plt = '32'
#l = 'mkl_ia32'
if l not in self._lib_mkl:
self._lib_mkl.insert(0, l)
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
def calc_info(self):
mkl = get_info('mkl')
if not mkl:
return
if sys.platform == 'win32':
lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack'])
else:
lapack_libs = self.get_libs('lapack_libs',
['mkl_lapack32', 'mkl_lapack64'])
info = {'libraries': lapack_libs}
dict_append(info, **mkl)
self.set_info(**info)
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
#if sys.platfcorm[:7] == 'freebsd':
## I don't think freebsd supports 3.10 at this time - 2014
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""")
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
#atlas_info = {} ## uncomment for testing
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
#blas_info = {} ## uncomment for testing
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
if platform.system() == 'Windows':
# The check for windows is needed because has_cblas uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
lib = self.has_cblas(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = [lib]
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def has_cblas(self, info):
# primitive cblas check by looking for the header and trying to link
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}"""
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
# check we can link (find library)
# some systems have separate cblas and blas libs. First
# check for cblas lib, and if not present check for blas lib.
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["cblas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "cblas"
except distutils.ccompiler.LinkError:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["blas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "blas"
except distutils.ccompiler.CompileError:
res = None
finally:
shutil.rmtree(tmpdir)
return res
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
## try:
## macros.append(
## (self.modulename.upper()+'_VERSION_HEX',
## hex(vstr2hex(module.__version__))),
## )
## except Exception as msg:
## print msg
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
## def vstr2hex(version):
## bits = []
## n = [24,16,8,4,0]
## r = 0
## for s in version.split('.'):
## r |= int(s) << n[0]
## del n[0]
## return r
#--------------------------------------------------------------------
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
|
[
"master@MacBook-Pro-admin.local"
] |
master@MacBook-Pro-admin.local
|
b20a8ceb62e68cea4660e241d323d08b5c8a9a34
|
b05b89e1f6378905bbb62e2a2bf2d4f8e3187932
|
/contiguousSubarrayWithMaxSum.py
|
cca7da339ae673798a2108e9eca5e36101113136
|
[
"MIT"
] |
permissive
|
anishmo99/Daily-Interview-Pro
|
c959cd336209132aebad67a409df685e654cfdfc
|
d8724e8feec558ab1882d22c9ca63b850b767753
|
refs/heads/master
| 2023-04-10T08:09:46.089227
| 2021-04-27T07:27:38
| 2021-04-27T07:27:38
| 269,157,996
| 1
| 1
|
MIT
| 2020-06-08T07:09:19
| 2020-06-03T17:57:21
|
C++
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
class Solution:
def maxSubArraySum(self, arr: List[int]) -> int:
cur_sum,max_sum=arr[0],arr[0]
for i in range(1,len(arr)):
cur_sum = max(arr[i],arr[i]+cur_sum)
max_sum = max(cur_sum,max_sum)
return max_sum
|
[
"ani10sh@gmail.com"
] |
ani10sh@gmail.com
|
19ced467669fb0dacbfd256cd91ac39a5fbd1a6d
|
78b961adcc7f2cc8c1e95a88a46220c94c1de85d
|
/esp32-micropython/server/util.py
|
58909bdeec47caa2dc2af41d5aa22bab04406cf4
|
[] |
no_license
|
camlee/power-meter
|
3b685ca7df26c774c8e946271a89b82d75c0b145
|
4954198e863de30d32af927da2cec6767e3681f9
|
refs/heads/master
| 2022-12-24T23:32:20.400866
| 2021-07-12T21:41:57
| 2021-07-12T21:41:57
| 144,101,312
| 4
| 1
| null | 2022-12-10T13:59:48
| 2018-08-09T04:37:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
import os
import time
import machine
EPOCH_DELTA = 946684800 # (date(2000, 1, 1) - date(1970, 1, 1)).days * 24*60*60
def set_time_from_epoch(epoch):
"""
Sets the system time from the provided unix epoch time
"""
# converting from unix epoch to ours (2000-01-01 00:00:00 UTC):
our_epoch_time = int(epoch) - EPOCH_DELTA
tm = time.localtime(our_epoch_time)
tm = tm[0:3] + (0,) + tm[3:6] + (0,)
machine.RTC().datetime(tm)
def epoch_time():
"""
Returns the current system time as the unix epoch.
"""
return time.time() + EPOCH_DELTA
def file_size(path, exclude=[]):
"""
Returns the number of bytes in the file or directory specified by path.
Recursively checks all subdirectories. Optionally, ommits the directories
specified in exclude.
"""
path = path.rstrip("/")
try:
stats = os.stat(path)
except OSError:
return 0 # Files that don't exist don't take up any space
if stats[0] & 0o040000: # Is a directory
total_size = 0
for file in os.listdir(path):
subpath = "%s/%s" % (path, file)
if subpath in exclude:
continue
total_size += file_size(subpath, exclude)
return total_size
else:
return stats[6]
|
[
"cam.w.lee@gmail.com"
] |
cam.w.lee@gmail.com
|
b032182e1d6228441cafe14eee800546baf0fee8
|
10a764666e3e520596fb057946d3031070cf5144
|
/news_list/admin.py
|
24c10d96a3cf2056722797f1f06279f950738cc1
|
[] |
no_license
|
HunterOut/News
|
097c01d7469a442f3965bc66aa6274dc84e85a78
|
82ce672ee7225db5a4bea35b50978039f1a55a9a
|
refs/heads/master
| 2023-08-15T01:59:58.861407
| 2020-07-07T17:50:44
| 2020-07-07T17:50:44
| 268,179,514
| 0
| 0
| null | 2021-09-22T19:07:42
| 2020-05-30T23:57:54
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
from django.contrib import admin
from .models import Post, Comment
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'author', 'publish', 'status')
list_filter = ('status', 'created', 'publish', 'author')
search_fields = ('title', 'body')
prepopulated_fields = {'slug': ('title',)}
raw_id_fields = ('author',)
date_hierarchy = 'publish'
ordering = ('status', 'publish')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'post', 'created')
|
[
"hunter_out@icloud.com"
] |
hunter_out@icloud.com
|
33105419cbada6eb8b54a848f81125e149dbce73
|
274532350bbdb855d700caae4d7771f07aebd55b
|
/Python/modules/regular expression/first.py
|
02eff6b9ce3992458d540957e2aa68a7fe4e0300
|
[] |
no_license
|
SA253/python-assignment
|
3ef86b5578e90a44c9762c0750aa9e523c01d33c
|
7a491557a6d4123778213b2acfd303becec0b297
|
refs/heads/master
| 2020-09-21T11:25:09.638484
| 2019-11-29T04:04:05
| 2019-11-29T04:04:05
| 224,191,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
import re
pattern="zomato"
#text="does this text match the pattern?" #ans:none ,not found
text="zomatonn" # ans: <re.Match object; span=(0, 6), match='zomato'> , found match
print(re.search(pattern,text))
if re.search(pattern,text):
print("found match")
else:
print("not found")
#type2 (gives only first occurance of the word) ans: 5 9 this
pattern="t[a-z]*"
text="does this text match the pattern?"
match=re.search(pattern,text)
if match:
print("found match")
print(match.start(),match.end(),text[match.start():match.end()])
else:
print("not found")
#type3 (gives all the occurances of the word)
"""ans: 5 9 this
5 9 this
10 14 text
17 20 tch
21 24 the
27 32 ttern"""
pattern="t[a-z]*"
text="does this text match the pattern?"
matches=re.finditer(pattern,text)
for match in matches:
print(match.start(),match.end(),text[match.start():match.end()])
#combining two patterns
"""ans:
0 3 tom
8 13 this
13 18 text
24 28 the"""
pattern="\st[a-z]*|^t[a-z]*" #\s is space before t only will be considered ,
text="Tom,does this text match the pattern?"
matches=re.findall(pattern,text,re.I) # re.I --ignores casesensitivity so here we can use T or t
print(matches)
|
[
"noreply@github.com"
] |
SA253.noreply@github.com
|
a84288226cbea66dd69ddc0696fb2f6421d6bdfb
|
c7071fe677a0cbea69fc2b4d9ad293119511b353
|
/comment/migrations/0001_initial.py
|
79034a6fa78c5a2e0454496ea40b61cccccf489c
|
[] |
no_license
|
jimmyliaoviva/MuseumDjango
|
3465fb83a88b771456a60e3c29cc6c45916c9450
|
663573932965cb1f46e42646ad9cfa98c4129a8c
|
refs/heads/main
| 2023-02-13T19:08:40.460158
| 2021-01-09T07:02:17
| 2021-01-09T07:02:17
| 306,344,255
| 0
| 0
| null | 2020-11-04T09:35:56
| 2020-10-22T13:21:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 933
|
py
|
# Generated by Django 3.1.2 on 2020-11-18 01:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0003_delete_comment'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('commentid', models.AutoField(primary_key=True, serialize=False)),
('comment', models.CharField(max_length=1500)),
('commenttime', models.DateTimeField(auto_now=True)),
('museum', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.museum')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"u0524079@nkfust.edu.tw"
] |
u0524079@nkfust.edu.tw
|
a6eb631441ab06d660845be6883cdca77ab775a3
|
7ae58b6286dde0e3949cc21125a56333443801b6
|
/blog/migrations/0001_initial.py
|
f65c2415be772424b90845c2cfe275ade0f9fece
|
[] |
no_license
|
divsingh14/portfolio-django
|
60a68013cd273fb3b88f7009b208d79e51c5fc53
|
e0fb897981c977af822fd56c5940cb2513419fd4
|
refs/heads/master
| 2022-11-13T10:19:17.020592
| 2020-07-01T15:39:34
| 2020-07-01T15:39:34
| 276,386,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
# Generated by Django 3.0.7 on 2020-06-30 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('pubDate', models.DateTimeField()),
('body', models.TextField()),
('image', models.ImageField(upload_to='images/')),
],
),
]
|
[
"divsingh14@yahoo.in"
] |
divsingh14@yahoo.in
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.