blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3b1236c2af033089a675439e47f130b4baec41 | dd573ed68682fd07da08143dd09f6d2324f51345 | /swea/모의SW역량테스트/5658_보물상자비밀번호.py | 0d31a6192d7541eaeb1e770c774b0ca5c24ab353 | [] | no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import sys
sys.stdin = open('5658_input.txt')
from collections import deque
T = int(input())
for tc in range(T):
N, K = map(int, input().split())
S = deque(input())
n = N//4
numbers = set()
# 회전한 상태
for _ in range(N//4):
for i in range(0, N, n):
temp = ''
for j in range(n):
temp += S[i+j]
numbers.add(int(temp, 16))
S.rotate()
print("#{} {}".format(tc+1, sorted(numbers)[::-1][K-1]))
# 다른 풀이
# T = int(input())
# for test_case in range(T):
# N, K = map(int, input().split())
# data = input() * 2
# ans = set()
# for i in range(N // 4):
# for j in range(1, 5):
# ans.add(int(data[i + (N // 4) * (j - 1) : i + (N // 4) * j], 16))
# # print(sorted(ans, reverse=True))
# print("#{} {}".format(test_case + 1, sorted(ans)[:: - 1][K - 1])) | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
3d99b1f9a252500cbbfbbfb578d4f1b8692adbd4 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Tutorials/Google API's Python Client/expandsymlinks.py | 28b3c662846a89d685f571c65fdc7a1e5d5cebcb | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5f1f470f54b639699e0364110137529c5a098bae64631d265ea1ec985e10485a
size 1756
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
74fb966098d31d5bd5b87dddb2f7208224027dee | 6a44e772dfdec969f5e2af430f0bf3a35eb73c4e | /src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/models/int_wrapper.py | fc276dcc05f4904cd2f0ccc34b7f154b9521e849 | [
"MIT"
] | permissive | lurumad/autorest | ecc4b1de223e4b4cdd226a3cf922a6940dbddd34 | fef0c4c9e7fdb5c851bdb095d5a2ff93572d452e | refs/heads/master | 2021-01-12T11:07:39.298341 | 2016-11-04T03:12:08 | 2016-11-04T03:12:08 | 72,835,570 | 1 | 0 | null | 2016-11-04T09:58:50 | 2016-11-04T09:58:50 | null | UTF-8 | Python | false | false | 925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IntWrapper(Model):
"""IntWrapper.
:param field1:
:type field1: int
:param field2:
:type field2: int
"""
_attribute_map = {
'field1': {'key': 'field1', 'type': 'int'},
'field2': {'key': 'field2', 'type': 'int'},
}
def __init__(self, field1=None, field2=None):
self.field1 = field1
self.field2 = field2
| [
"noreply@github.com"
] | lurumad.noreply@github.com |
fe63ce9389ff63768465358dbdd8a16ac4f05c7f | ea85e903db500eee66fe70ed3029b05577494d9d | /排序/349. 两个数组的交集.py | 40954a68011ddec065523c8ae1fe288574149fd7 | [] | no_license | baolibin/leetcode | fcd975eb23e5ca3fc7febbd6c47ec833595b5a51 | bc0540ec42131439be144cca19f6355a01de992a | refs/heads/master | 2021-08-15T20:40:25.580955 | 2021-01-20T09:57:21 | 2021-01-20T09:57:21 | 76,557,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # coding:utf-8
'''
349. 两个数组的交集
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入:nums1 = [1,2,2,1], nums2 = [2,2]
输出:[2]
示例 2:
输入:nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出:[9,4]
说明:
输出结果中的每个元素一定是唯一的。
我们可以不考虑输出结果的顺序。
'''
def intersection(nums1, nums2):
n_1 = list(set(nums1))
n_2 = list(set(nums2))
res = []
for each in n_1:
if each in n_2:
res.append(each)
return res
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
print(intersection(nums1, nums2))
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
n_1 = list(set(nums1))
n_2 = list(set(nums2))
res = []
for each in n_1:
if each in n_2:
res.append(each)
return res
| [
"yangfengling@inttech.cn"
] | yangfengling@inttech.cn |
b0c71791df4278ee8b534d877f78f47e8c7b4ad7 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.4.0/_downloads/62c71faf74da9063a3e8cccd6bfdbed9/boxplot_demo.py | d5eb08f9cf1d9be597338a1e963c023ea4f694ef | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 7,870 | py | """
========
Boxplots
========
Visualizing boxplots with matplotlib.
The following examples show off how to visualize boxplots with
Matplotlib. There are many options to control their appearance and
the statistics that they use to summarize the data.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
# Fixing random state for reproducibility
np.random.seed(19680801)
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low))
fig, axs = plt.subplots(2, 3)
# basic plot
axs[0, 0].boxplot(data)
axs[0, 0].set_title('basic plot')
# notched plot
axs[0, 1].boxplot(data, 1)
axs[0, 1].set_title('notched plot')
# change outlier point symbols
axs[0, 2].boxplot(data, 0, 'gD')
axs[0, 2].set_title('change outlier\npoint symbols')
# don't show outlier points
axs[1, 0].boxplot(data, 0, '')
axs[1, 0].set_title("don't show\noutlier points")
# horizontal boxes
axs[1, 1].boxplot(data, 0, 'rs', 0)
axs[1, 1].set_title('horizontal boxes')
# change whisker length
axs[1, 2].boxplot(data, 0, 'rs', 0, 0.75)
axs[1, 2].set_title('change whisker length')
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
# fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low))
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2]]
# Multiple box plots on one Axes
fig, ax = plt.subplots()
ax.boxplot(data)
plt.show()
###############################################################################
# Below we'll generate data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
random_dists = ['Normal(1, 1)', 'Lognormal(1, 1)', 'Exp(1)', 'Gumbel(6, 4)',
'Triangular(2, 9, 11)']
N = 500
norm = np.random.normal(1, 1, N)
logn = np.random.lognormal(1, 1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrap_indices = np.random.randint(0, N, N)
data = [
norm, norm[bootstrap_indices],
logn, logn[bootstrap_indices],
expo, expo[bootstrap_indices],
gumb, gumb[bootstrap_indices],
tria, tria[bootstrap_indices],
]
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.manager.set_window_title('A Boxplot Example')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set(
axisbelow=True, # Hide the grid behind plot objects
title='Comparison of IID Bootstrap Resampling Across Five Distributions',
xlabel='Distribution',
ylabel='Value',
)
# Now fill the boxes with desired colors
box_colors = ['darkkhaki', 'royalblue']
num_boxes = len(data)
medians = np.empty(num_boxes)
for i in range(num_boxes):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = np.column_stack([box_x, box_y])
# Alternate between Dark Khaki and Royal Blue
ax1.add_patch(Polygon(box_coords, facecolor=box_colors[i % 2]))
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
median_x = []
median_y = []
for j in range(2):
median_x.append(med.get_xdata()[j])
median_y.append(med.get_ydata()[j])
ax1.plot(median_x, median_y, 'k')
medians[i] = median_y[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot(np.average(med.get_xdata()), np.average(data[i]),
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, num_boxes + 0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
ax1.set_xticklabels(np.repeat(random_dists, 2),
rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(num_boxes) + 1
upper_labels = [str(round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(num_boxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], .95, upper_labels[tick],
transform=ax1.get_xaxis_transform(),
horizontalalignment='center', size='x-small',
weight=weights[k], color=box_colors[k])
# Finally, add a basic legend
fig.text(0.80, 0.08, f'{N} Random Numbers',
backgroundcolor=box_colors[0], color='black', weight='roman',
size='x-small')
fig.text(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=box_colors[1],
color='white', weight='roman', size='x-small')
fig.text(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show()
###############################################################################
# Here we write a custom function to bootstrap confidence intervals.
# We can then use the boxplot along with this function to show these intervals.
def fake_bootstrapper(n):
"""
This is just a placeholder for the user's method of
bootstrapping the median and its confidence intervals.
Returns an arbitrary median and confidence interval packed into a tuple.
"""
if n == 1:
med = 0.1
ci = (-0.25, 0.25)
else:
med = 0.2
ci = (-0.35, 0.50)
return med, ci
inc = 0.1
e1 = np.random.normal(0, 1, size=500)
e2 = np.random.normal(0, 1, size=500)
e3 = np.random.normal(0, 1 + inc, size=500)
e4 = np.random.normal(0, 1 + 2*inc, size=500)
treatments = [e1, e2, e3, e4]
med1, ci1 = fake_bootstrapper(1)
med2, ci2 = fake_bootstrapper(2)
medians = [None, None, med1, med2]
conf_intervals = [None, None, ci1, ci2]
fig, ax = plt.subplots()
pos = np.arange(len(treatments)) + 1
bp = ax.boxplot(treatments, sym='k+', positions=pos,
notch=1, bootstrap=5000,
usermedians=medians,
conf_intervals=conf_intervals)
ax.set_xlabel('treatment')
ax.set_ylabel('response')
plt.setp(bp['whiskers'], color='k', linestyle='-')
plt.setp(bp['fliers'], markersize=3.0)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions and methods is shown in this example:
import matplotlib
matplotlib.axes.Axes.boxplot
matplotlib.pyplot.boxplot
matplotlib.axes.Axes.set
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
1b5d14da025a52cca9cb61629495dbd0444d9afa | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2020/01january/14.py | a3f2553dce31bd97e8db5f46114b464bdc4eb538 | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
# Author: june-fu
# Date : 2020/1/27
"""
python-practice-book 12
Write a function group(list, size) that take a list and splits into smaller lists of given size.
input: group([1, 2, 3, 4, 5, 6, 7, 8, 9], 4)
output: [[1, 2, 3, 4], [5, 6, 7, 8], [9]]
"""
def group(list1, size):
# return [list1[i::size] for i in range(size)]
list_one = []
if int(len(list1) % size) == 0:
x = int(len(list1)/size)
else:
x = int(len(list1)/size) + 1
for i in range(x):
list_one.append(list1[i * size:(i+1) * size])
return list_one
# use slice
def group1(list1, size):
return [list1[i:i+size] for i in range(0, len(list1), size)]
if __name__ == '__main__':
print(group([1, 2, 3, 4, 5, 6, 7, 8, 9], 4))
print(group1([1, 2, 3, 4, 5, 6, 7, 8, 9], 4))
| [
"fujun1990@gmail.com"
] | fujun1990@gmail.com |
af7900ac95be793c451bd2cf4e983dfefb9c666a | bb959d621b83ec2c36d0071dd48bc20942c0dd84 | /apps/users/forms.py | 456ba979e88254bf1317fed9a3c4b540b56701e5 | [
"BSD-3-Clause"
] | permissive | clincher/django-base-template | 6dac3b67db80b2e6336698f3ed60c8ef85dd974f | 560263bf0bb9737f89ae06e391b3d730f102046b | refs/heads/master | 2021-01-18T08:30:27.286465 | 2014-07-19T09:51:27 | 2014-07-19T09:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | # -*- coding: utf-8 -*-
from django.forms import ModelForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import UserManager
from braces.views import FormValidMessageMixin
class UserForm(FormValidMessageMixin, ModelForm):
form_valid_message = _(u"Account updated!")
success_list_url = "user_update"
class Meta:
model = get_user_model()
exclude = [
'password',
'email',
'is_staff',
'is_active',
'date_joined',
'last_login',
'groups',
'is_superuser',
'user_permissions'
]
class CleanEmailMixin:
def clean_email(self):
return UserManager.normalize_email(self.cleaned_data["email"])
class EmailAuthenticateForm(CleanEmailMixin, forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.EmailField(max_length=255)
password = forms.CharField(label=_("Пароль"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Пожалуйста, проверьте правильность ввода "
"электронной почты и пароля. "
"Обратите внимание на то, что оба поля "
"чувствительны к регистру."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("Акаунт не активирован."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(EmailAuthenticateForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
email_field = UserModel._meta.get_field(UserModel.EMAIL_FIELD)
self.fields[UserModel.EMAIL_FIELD].label = capfirst(
email_field.verbose_name)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class RegistrationForm(CleanEmailMixin, forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given email and
password.
"""
error_messages = {
'duplicate_email': _("Плоьзователь с таким именем уже существует."),
'password_mismatch': _("Пароли не совпадают."),
}
password1 = forms.CharField(label=_("Пароль"), widget=forms.PasswordInput)
password2 = forms.CharField(
label=_("Подтверждение пароля"), widget=forms.PasswordInput,
help_text=_("Введите еще раз тот же пароль для проверки."))
class Meta:
model = get_user_model()
fields = ("email",)
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = super(RegistrationForm, self).clean_email()
User = get_user_model()
if User.objects.filter(email=email).exists():
raise forms.ValidationError(self.error_messages['duplicate_email'])
return email
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminUserCreationForm(CleanEmailMixin, forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = get_user_model()
excludes = ('residence', 'timezone', 'bio', 'is_staff', 'is_active')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(AdminUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminUserChangeForm(CleanEmailMixin, forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = get_user_model()
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| [
"va.bolshakov@gmail.com"
] | va.bolshakov@gmail.com |
cd07a0839cf198525a33aed5909864ac6527e502 | 77b3ef4cae52a60181dfdf34ee594afc7a948925 | /mediation/dags/cm_sub_dag_parse_and_import_zte_3g.py | 5781cc0b7f0d683960266ee1c6de6a45da242481 | [
"Apache-2.0"
] | permissive | chandusekhar/bts-ce | 4cb6d1734efbda3503cb5fe75f0680c03e4cda15 | ad546dd06ca3c89d0c96ac8242302f4678ca3ee3 | refs/heads/master | 2021-07-15T02:44:27.646683 | 2020-07-26T08:32:33 | 2020-07-26T08:32:33 | 183,961,877 | 0 | 0 | Apache-2.0 | 2020-07-26T08:32:34 | 2019-04-28T21:42:29 | Python | UTF-8 | Python | false | false | 2,466 | py | import sys
import os
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
from cm_sub_dag_parse_huawei_2g_files import run_huawei_2g_parser
from cm_sub_dag_import_huawei_2g_files import import_huawei_2g_parsed_csv
sys.path.append('/mediation/packages');
from bts import NetworkBaseLine, Utils, ProcessCMData;
bts_utils = Utils();
def parse_and_import_zte_3g(parent_dag_name, child_dag_name, start_date, schedule_interval):
"""Parse and import ZTE 2G"""
dag_id = '%s.%s' % (parent_dag_name, child_dag_name)
dag = DAG(
'%s.%s' % (parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
# @TODO: Investigate other ways to check if there are not files yet
t28 = BashOperator(
task_id='check_if_zte_3g_raw_files_exist',
bash_command='ls -1 /mediation/data/cm/zte/raw/in | wc -l',
dag=dag)
# @TODO: Backup parsed files
t30 = BashOperator(
task_id='backup_zte_3g_csv_files',
bash_command='mv -f /mediation/data/cm/zte/3g/parsed/bulkcm_umts/* /mediation/data/cm/zte/parsed/backup/ 2>/dev/null || true',
dag=dag)
def clear_zte_2g_cm_tables():
pass
t31 = PythonOperator(
task_id='clear_zte_3g_cm_tables',
python_callable=clear_zte_2g_cm_tables,
dag=dag)
parse_zte_2g_cm_files = BashOperator(
task_id='parse_zte_3g_cm_files',
bash_command='java -jar /mediation/bin/boda-bulkcmparser.jar /mediation/data/cm/zte/raw/bulkcm_umts /mediation/data/cm/zte/parsed/bulkcm_umts /mediation/conf/cm/zte_cm_3g_blkcm_parser.cfg',
dag=dag)
import_zte_cm_data = BashOperator(
task_id='import_zte_3g_cm_data',
bash_command='python /mediation/bin/load_cm_data_into_db.py zte_bulkcm_umts /mediation/data/cm/zte/parsed/bulkcm_umts ',
dag=dag)
dag.set_dependency('check_if_zte_3g_raw_files_exist', 'backup_zte_3g_csv_files')
dag.set_dependency('backup_zte_3g_csv_files', 'parse_zte_3g_cm_files')
dag.set_dependency('parse_zte_3g_cm_files', 'clear_zte_3g_cm_tables')
dag.set_dependency('clear_zte_3g_cm_tables', 'import_zte_3g_cm_data')
return dag | [
"emmanuel.ssebaggala@bodastage.com"
] | emmanuel.ssebaggala@bodastage.com |
8e46ea58063b50c3a21f81a13f3a9e256b2011bc | 0e0ce88c886370df9af51855115c99dfc003e5da | /2019/01_Curso_Geek_basico_avancado/Exercicios_Python_Geek/exercicio_daniel/ex_2.py | fd492eecbc0a4528145e6fa1523d2b6de4c3795f | [] | no_license | miguelzeph/Python_Git | ed80db9a4f060836203df8cc2e42e003b0df6afd | 79d3b00236e7f4194d2a23fb016b43e9d09311e6 | refs/heads/master | 2021-07-08T18:43:45.855023 | 2021-04-01T14:12:23 | 2021-04-01T14:12:23 | 232,007,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | """2-) Teste se o ano é bissexto"""
ano = int(input("Digite o ano: "))
if ano%4 == 0:
print("É ano bissexto, tem 366 dias")
else:
print("Não é ano bissexto, tem 365 dias") | [
"miguel.junior.mat@hotmail.com"
] | miguel.junior.mat@hotmail.com |
3ae8c2d0c34a084d8aaad125c9ce618b4e15a444 | dbcd14a6a4e85f3e6a815c3fd05125ccb57d99b3 | /data_science/pandas_datacamp/manipulating_dataframes_with_pandas/03_rearranging_and_reshaping_data/05_stacking_and_unstacking_II.py | 2800c570456538d2908b651c37062cf9f126b606 | [] | no_license | blockchainassets/data-engineering | b1351b1321c612ba651f27230b506ebf73d949b8 | fc3136e89bc7defafb9e2fa6377217066f124fc7 | refs/heads/master | 2021-05-17T14:56:59.911700 | 2020-03-28T14:47:37 | 2020-03-28T14:47:37 | 250,831,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | '''
Stacking & unstacking II
You are now going to continue working with the users DataFrame. As always, first explore it in the IPython Shell to see the layout and note the index.
Your job in this exercise is to unstack and then stack the 'city' level, as you did previously for 'weekday'. Note that you won't get the same DataFrame.
Instructions
Define a DataFrame bycity with the 'city' level of users unstacked.
Print the bycity DataFrame to see the new data layout. This has been done for you.
Stack bycity by 'city' and print it to check if you get the same layout as the original users DataFrame.
'''
# Unstack users by 'city': bycity
bycity = users.unstack(level='city')
# Print the bycity DataFrame
print(bycity)
# Stack bycity by 'city' and print it
print(bycity.stack(level='city'))
| [
"pierinaacam@gmail.com"
] | pierinaacam@gmail.com |
eb5c93e8330a4712426af4c4de2fcbb3b250f22e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03409/s717601792.py | 2e4854f89fc655c31e61797553566850201cbd4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | n=int(input())
a=[0]*n
b=[0]*n
c=[0]*n
d=[0]*n
edges=[set() for _ in range(n)]
matched=[-1]*n
for i in range(n):
ta,tb=list(map(int,input().split()))
a[i]=ta
b[i]=tb
for i in range(n):
tc,td=list(map(int,input().split()))
c[i]=tc
d[i]=td
for i in range(n):
for j in range(n):
if a[i]<c[j] and b[i]<d[j]:
edges[i].add(j)
def dfs(v,visited):
for u in edges[v]:
if u in visited:
continue
visited.add(u)
if matched[u]==-1 or dfs(matched[u],visited):
matched[u]=v
return True
return False
print(sum(dfs(s,set()) for s in range(n))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
38ff709a61cfb1ae8896dc1e3c08780ad1e9a2ef | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Python/Interview Que/Interview Companies/Helious/helious.py | 689ab9b6cbb7979e1b4729d382fd1fd9463b130b | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import pandas as pd
def amazon(rows,column,grid):
l = grid
li = []
j = 1
for i in range(1,rows+1):
#print(i)
del li[:]
li.append(l[j:i+1])
j = j + 1
#print(li)
#print(len(li))
df = pd.read_excel('helious_excel.xlsx',index = False)
products_list = df.values.tolist()
print(products_list)
#print(df[1:2])
amazon(5,4,df) | [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
65a307e046bd927498c11951ac68c7b6f1820282 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/sunData/SType/ST_facets/ST_facets00103m/ST_facets00103m1_p.py | 48419483f9e7d9f711bb0a9db68d1efb0f190a40 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 137 | py | from output.models.sun_data.stype.st_facets.st_facets00103m.st_facets00103m_xsd.st_facets00103m import Test
obj = Test(
value=99
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
717a43259dc60384e77d65e86db860b4dc534928 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/assembler/AssemblyDualTextField.pyi | 2bb5f62b3cb243f9fb56b96919696eb29f21d33a | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,327 | pyi | from typing import List
import docking.widgets.autocomplete
import ghidra
import ghidra.app.plugin.core.assembler
import ghidra.program.model.lang
import ghidra.program.util
import java.awt
import java.awt.event
import java.lang
import javax.swing
class AssemblyDualTextField(object):
class AssemblyDualTextFieldDemo(object, ghidra.GhidraLaunchable):
ADDR_FORMAT: unicode
DEMO_LANG_ID: ghidra.program.model.lang.LanguageID
def __init__(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def launch(self, __a0: ghidra.GhidraApplicationLayout, __a1: List[unicode]) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class AssemblyCompletion(object, java.lang.Comparable):
def __init__(self, __a0: unicode, __a1: unicode, __a2: java.awt.Color, __a3: int): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField.AssemblyCompletion) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getCanDefault(self) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColor(self) -> java.awt.Color: ...
def getDisplay(self) -> unicode: ...
def getText(self) -> unicode: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def canDefault(self) -> bool: ...
@property
def color(self) -> java.awt.Color: ...
@property
def display(self) -> unicode: ...
@property
def text(self) -> unicode: ...
class VisibilityMode(java.lang.Enum):
DUAL_VISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = DUAL_VISIBLE
INVISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = INVISIBLE
SINGLE_VISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = SINGLE_VISIBLE
@overload
def compareTo(self, __a0: java.lang.Enum) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDeclaringClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def name(self) -> unicode: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def ordinal(self) -> int: ...
def toString(self) -> unicode: ...
@overload
@staticmethod
def valueOf(__a0: unicode) -> ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode: ...
@overload
@staticmethod
def valueOf(__a0: java.lang.Class, __a1: unicode) -> java.lang.Enum: ...
@staticmethod
def values() -> List[ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode]: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self): ...
def addFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def addKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def clear(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getAssemblyField(self) -> javax.swing.JTextField: ...
def getAutocompleter(self) -> docking.widgets.autocomplete.TextFieldAutocompleter: ...
def getClass(self) -> java.lang.Class: ...
def getMnemonicField(self) -> javax.swing.JTextField: ...
def getOperandsField(self) -> javax.swing.JTextField: ...
def getText(self) -> unicode: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setCaretPosition(self, __a0: int) -> None: ...
def setLanguageLocation(self, __a0: ghidra.program.model.lang.Language, __a1: long) -> None: ...
def setProgramLocation(self, __a0: ghidra.program.util.ProgramLocation) -> None: ...
def setText(self, __a0: unicode) -> None: ...
def setVisible(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def assemblyField(self) -> javax.swing.JTextField: ...
@property
def autocompleter(self) -> docking.widgets.autocomplete.TextFieldAutocompleter: ...
@property
def caretPosition(self) -> None: ... # No getter available.
@caretPosition.setter
def caretPosition(self, value: int) -> None: ...
@property
def mnemonicField(self) -> javax.swing.JTextField: ...
@property
def operandsField(self) -> javax.swing.JTextField: ...
@property
def programLocation(self) -> None: ... # No getter available.
@programLocation.setter
def programLocation(self, value: ghidra.program.util.ProgramLocation) -> None: ...
@property
def text(self) -> unicode: ...
@text.setter
def text(self, value: unicode) -> None: ...
@property
def visible(self) -> None: ... # No getter available.
@visible.setter
def visible(self, value: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
5d9be8cbd2400d26b6973bf47290a761629d2234 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /programming/language/python/xmpppy/actions.py | c7191c67dea317bccb65cc5fb9ea28569e810d0e | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "%s-%s" % (get.srcNAME(), get.srcVERSION().replace("_", ""))
def install():
pythonmodules.install()
pisitools.insinto("%s/%s" % (get.docDIR(), get.srcNAME()), "doc/*")
pisitools.dodoc("ChangeLog")
| [
"ozancaglayan@users.noreply.github.com"
] | ozancaglayan@users.noreply.github.com |
fe45965db65727374e8c7858346a6f1b042d6ccb | aa0270b351402e421631ebc8b51e528448302fab | /sdk/containerregistry/azure-containerregistry/azure/containerregistry/_version.py | e58e03dcf5cb74c0037ab2a31a9b3476cbb2efe7 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 172 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
VERSION = "1.1.0b5"
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
9c45ea43930611d5d6687bc81905d6bd6dc7511d | 32dbb74f03c7450ee1f3166f82260e60272f57e0 | /Push/special_mixer_component.py | 9867a321f3da17667503d3af980a96407815a7f9 | [] | no_license | cce/buttons10 | 61555bc767f2bd300bfffb373f9feaae96b83ca7 | 6f1137c96eead0b9771ad8ec9327dd72ada2e916 | refs/heads/master | 2021-04-15T09:45:39.684764 | 2018-03-24T04:29:52 | 2018-03-24T04:29:52 | 126,565,725 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,946 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push/special_mixer_component.py
from __future__ import absolute_import, print_function, unicode_literals
from itertools import izip_longest
from ableton.v2.base import listens
from ableton.v2.control_surface import components
from ableton.v2.control_surface.elements import DisplayDataSource
from .special_chan_strip_component import SpecialChanStripComponent
class SpecialMixerComponent(components.MixerComponent):
u"""
Special mixer class that uses return tracks alongside midi and
audio tracks. This provides also a more convenient interface to
set controls for the different modes of Push.
"""
num_label_segments = 4
def __init__(self, *a, **k):
super(SpecialMixerComponent, self).__init__(*a, **k)
self._pan_send_index = 0
self._pan_send_controls = None
self._pan_send_names_display = None
self._pan_send_values_display = None
self._pan_send_graphics_display = None
self._pan_send_toggle_skip = False
self._selected_track_data_sources = map(DisplayDataSource, (u'',) * self.num_label_segments)
self._selected_track_data_sources[0].set_display_string(u'Track Selection:')
self._selected_track_name_data_source = self._selected_track_data_sources[1]
self._on_selected_track_changed.subject = self.song.view
self._on_track_list_changed.subject = self.song
self._update_selected_track_name()
return
def _create_strip(self):
return SpecialChanStripComponent()
def set_pan_send_toggle(self, toggle):
u"""
The pan_send_toggle cycles through the different pan, or send
modes changing the bejhaviour of the pan_send display and
controls.
"""
self._pan_send_toggle = toggle
self._on_pan_send_value.subject = toggle
self._pan_send_toggle_skip = True
def set_selected_track_name_display(self, display):
if display:
display.set_data_sources(self._selected_track_data_sources)
def set_track_select_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Option.Selected', u'Option.Unselected')
strip.set_select_button(button)
def set_solo_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Mixer.SoloOn', u'Mixer.SoloOff')
strip.set_solo_button(button)
def set_mute_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Mixer.MuteOff', u'Mixer.MuteOn')
strip.set_mute_button(button)
def set_track_names_display(self, display):
if display:
sources = [ strip.track_name_data_source() for strip in self._channel_strips ]
display.set_data_sources(sources)
def set_volume_names_display(self, display):
self._set_parameter_names_display(display, 0)
def set_volume_values_display(self, display):
self._set_parameter_values_display(display, 0)
def set_volume_graphics_display(self, display):
self._set_parameter_graphics_display(display, 0)
def set_volume_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_volume_control(control)
def set_pan_send_names_display(self, display):
self._normalize_pan_send_index()
self._pan_send_names_display = display
self._set_parameter_names_display(display, self._pan_send_index + 1)
def set_pan_send_values_display(self, display):
self._normalize_pan_send_index()
self._pan_send_values_display = display
self._set_parameter_values_display(display, self._pan_send_index + 1)
def set_pan_send_graphics_display(self, display):
self._normalize_pan_send_index()
self._pan_send_graphics_display = display
self._set_parameter_graphics_display(display, self._pan_send_index + 1)
def set_pan_send_controls(self, controls):
self.set_send_controls(None)
self.set_pan_controls(None)
self._pan_send_controls = controls
self._normalize_pan_send_index()
if self._pan_send_index == 0:
self.set_pan_controls(controls)
else:
sends = self._pan_send_index - 1
self.set_send_controls(map(lambda ctl: (None,) * sends + (ctl,), controls or []))
return
@listens(u'visible_tracks')
def _on_track_list_changed(self):
self._update_pan_sends()
def set_pan_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_pan_control(control)
def set_send_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_send_controls(control)
def _set_parameter_names_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_name_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
def _set_parameter_values_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_data_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
def _set_parameter_graphics_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_graphic_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
@listens(u'value')
def _on_pan_send_value(self, value):
if not self._pan_send_toggle_skip and self.is_enabled() and (value or not self._pan_send_toggle.is_momentary()):
self._pan_send_index += 1
self._update_pan_sends()
self._pan_send_toggle_skip = False
def _update_pan_sends(self):
self.set_pan_send_controls(self._pan_send_controls)
self.set_pan_send_names_display(self._pan_send_names_display)
self.set_pan_send_graphics_display(self._pan_send_graphics_display)
def _normalize_pan_send_index(self):
if len(self.song.tracks) == 0 or self._pan_send_index > len(self.song.tracks[0].mixer_device.sends):
self._pan_send_index = 0
@listens(u'selected_track.name')
def _on_selected_track_changed(self):
self._update_selected_track_name()
def _update_selected_track_name(self):
selected = self.song.view.selected_track
self._selected_track_name_data_source.set_display_string(selected.name) | [
"cce@appneta.com"
] | cce@appneta.com |
d5e78a332dff2134a46453a63b84a1c416791fe0 | 7cfcb2a79226d8fe90276bd32964d94243cc496a | /joints_detectors/hrnet/pose_estimation/video.py | 5271ff11f7860bd444e5f2f422c916880f5c3343 | [
"MIT"
] | permissive | daydreamer2023/videopose | 5185442eb31138f6bd6a86fdbec2c411a8773bc3 | 463f5d2770288a217033e901d1a8251b489d7f76 | refs/heads/master | 2022-01-12T16:14:33.589459 | 2019-05-13T12:45:23 | 2019-05-13T12:45:23 | 186,530,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,104 | py | '''
使用yolov3作为pose net模型的前处理
use yolov3 as the 2d human bbox detector
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
path1 = os.path.split(os.path.realpath(__file__))[0]
path2 = os.path.join(path1, '..')
sys.path.insert(0, path1)
sys.path.insert(0, path2)
import argparse
import pprint
import ipdb;pdb=ipdb.set_trace
import numpy as np
from tqdm import tqdm
from utilitys import plot_keypoint, PreProcess
import time
import torch
import _init_paths
from config import cfg
import config
from config import update_config
from utils.transforms import *
from lib.core.inference import get_final_preds
import cv2
import models
from lib.detector.yolo.human_detector import main as yolo_det
from scipy.signal import savgol_filter
from lib.detector.yolo.human_detector import load_model as yolo_model
sys.path.pop(0)
sys.path.pop(1)
sys.path.pop(2)
kpt_queue = []
def smooth_filter(kpts):
if len(kpt_queue) < 6:
kpt_queue.append(kpts)
return kpts
queue_length = len(kpt_queue)
if queue_length == 50:
kpt_queue.pop(0)
kpt_queue.append(kpts)
# transpose to shape (17, 2, num, 50) 关节点keypoints num、横纵坐标、每帧人数、帧数
transKpts = np.array(kpt_queue).transpose(1,2,3,0)
window_length = queue_length - 1 if queue_length % 2 == 0 else queue_length - 2
# array, window_length(bigger is better), polyorder
result = savgol_filter(transKpts, window_length, 3).transpose(3, 0, 1, 2) #shape(frame_num, human_num, 17, 2)
# 返回倒数第几帧 return third from last frame
return result[-3]
class get_args():
# hrnet config
cfg = path2 + '/experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml'
dataDir=''
logDir=''
modelDir=''
opts=[]
prevModelDir=''
##### load model
def model_load(config):
model = eval('models.'+config.MODEL.NAME+'.get_pose_net')(
config, is_train=False
)
model_file_name = path2 + '/models/pytorch/pose_coco/pose_hrnet_w32_256x192.pth'
state_dict = torch.load(model_file_name)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
return model
def ckpt_time(t0=None, display=None):
if not t0:
return time.time()
else:
t1 = time.time()
if display:
print('consume {:2f} second'.format(t1-t0))
return t1-t0, t1
###### LOAD human detecotor model
human_model = yolo_model()
def generate_kpts(video_name):
args = get_args()
update_config(cfg, args)
cam = cv2.VideoCapture(video_name)
video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
ret_val, input_image = cam.read()
# Video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
input_fps = cam.get(cv2.CAP_PROP_FPS)
#### load pose-hrnet MODEL
pose_model = model_load(cfg)
pose_model.cuda()
# collect keypoints coordinate
kpts_result = []
for i in tqdm(range(video_length-1)):
ret_val, input_image = cam.read()
try:
bboxs, scores = yolo_det(input_image, human_model)
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(input_image, bboxs, scores, cfg)
except Exception as e:
print(e)
continue
with torch.no_grad():
# compute output heatmap
inputs = inputs[:,[2,1,0]]
output = pose_model(inputs.cuda())
# compute coordinate
preds, maxvals = get_final_preds(
cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
# smooth and fine-tune coordinates
preds = smooth_filter(preds)
# 3D video pose (only support single human)
kpts_result.append(preds[0])
result = np.array(kpts_result)
return result
if __name__ == '__main__':
main()
| [
"lxy5513@gmail.com"
] | lxy5513@gmail.com |
e6365024ee3cd8e06acfeef8570a4e5969727fbd | ca17bd80ac1d02c711423ac4093330172002a513 | /remove_invalid_parenthese/RemoveInvalidParenthese.py | 6228196ed2a67837b1c30815b6bd3b8b6f1be798 | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | class Solution(object):
def isvalid(self,s):
ctr = 0
for c in s:
if c == '(':
ctr += 1
elif c == ')':
ctr -= 1
if ctr < 0:
return False
return ctr == 0
def removeInvalidParentheses(self, s):
level = {s}
while True:
valid = filter(self.isvalid, level)
if valid:
return valid
level ={s[:i] + s[i+1:] for s in level for i in range(len(s))}
return []
#test
if __name__ == "__main__":
sol = Solution()
print sol.removeInvalidParentheses("()())()")
print sol.removeInvalidParentheses("(a)())()")
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
d0521ace1b2b1d05327842085cb9e3c88b7ff56e | 6532b2c6dfefa27a7f5c3c790f13dfc1e42cc703 | /mlbstats/wsgi.py | b178aa582e80746b55bd0bee8b2755a5fc27ee86 | [] | no_license | aclark4life/mlbstats | c59ba44b17541364fc4da16dbd3383f7ea05497f | b939f32dadeed6642b23de3e57624c0b0661b87c | refs/heads/master | 2023-04-29T07:15:36.471625 | 2021-05-18T14:35:12 | 2021-05-18T14:35:12 | 348,089,359 | 0 | 0 | null | 2021-05-18T14:35:12 | 2021-03-15T18:57:54 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for mlbstats project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mlbstats.settings.dev")
application = get_wsgi_application()
| [
"aclark@aclark.net"
] | aclark@aclark.net |
f3c73006fa5e1104d3838c08a0ff4d64b2ed7f7f | fcd00440495737c72f2ec0183e78002a6c9f0afe | /sample_plot_script_2.py | b51aa77d168c9ec09d9eb0a026c13281b5054960 | [] | no_license | Riashat/Plotting_Scripts | caa7ba20fbaa81e6f415c8311ec3d481f45e72c1 | 1b0b6d61aac0f3dbc0c6af207ce351d45e88411f | refs/heads/master | 2021-01-23T18:22:04.741566 | 2018-05-08T16:43:25 | 2018-05-08T16:43:25 | 102,787,346 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,040 | py | import matplotlib.pyplot as plt
import time
import numpy as np
import pandas as pd
from numpy import genfromtxt
import pdb
from scipy import stats
eps = np.arange(1000)
#HalfCheetah Policy Activations
hs_leaky_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_Leaky_Relu_all_exp_rewards.npy')
hs_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_Relu_all_exp_rewards.npy')
hs_tanh = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_TanH_all_exp_rewards.npy')
mean_hs_leaky = np.mean(hs_leaky_relu, axis=1)
mean_hs_relu = np.mean(hs_relu, axis=1)
mean_hs_tanh = np.mean(hs_tanh, axis=1)
std_hs_leaky = np.std(hs_leaky_relu, axis=1)
std_hs_relu = np.std(hs_relu, axis=1)
std_hs_tanh = np.std(hs_tanh, axis=1)
last_hs_leaky = mean_hs_leaky[-1]
last_error_hs_leaky = stats.sem(hs_leaky_relu[-1, :], axis=None, ddof=0)
print ("last_hs_leaky", last_hs_leaky)
print ("last_error_hs_leaky", last_error_hs_leaky)
last_hs_relu = mean_hs_relu[-1]
last_error_hs_relu = stats.sem(hs_relu[-1, :], axis=None, ddof=0)
print ("last_hs_relu", last_hs_relu)
print ("last_error_hs_relu", last_error_hs_relu)
last_hs_tanh = mean_hs_tanh[-1]
last_error_hs_tanh = stats.sem(hs_tanh[-1, :], axis=None, ddof=0)
print ("last_hs_tanh", last_hs_tanh)
print ("last_error_hs_tanh", last_error_hs_tanh)
#Hopper Policy Activations
ho_leaky_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_Leaky_Relu_all_exp_rewards.npy')
ho_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_Relu_all_exp_rewards.npy')
ho_tanh = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_TanH_all_exp_rewards.npy')
mean_ho_leaky = np.mean(ho_leaky_relu, axis=1)
mean_ho_relu = np.mean(ho_relu, axis=1)
mean_ho_tanh = np.mean(ho_tanh, axis=1)
std_ho_leaky = np.std(ho_leaky_relu, axis=1)
std_ho_relu = np.std(ho_relu, axis=1)
std_ho_tanh = np.std(ho_tanh, axis=1)
last_ho_leaky = mean_ho_leaky[-1]
last_error_ho_leaky = stats.sem(ho_leaky_relu[-1, :], axis=None, ddof=0)
print ("last_ho_leaky", last_ho_leaky)
print ("last_error_ho_leaky", last_error_ho_leaky)
last_ho_relu = mean_ho_relu[-1]
last_error_ho_relu = stats.sem(ho_relu[-1, :], axis=None, ddof=0)
print ("last_ho_relu", last_ho_relu)
print ("last_error_ho_relu", last_error_ho_relu)
last_ho_tanh = mean_ho_tanh[-1]
last_error_ho_tanh = stats.sem(ho_tanh[-1, :], axis=None, ddof=0)
print ("last_ho_tanh", last_ho_tanh)
print ("last_error_ho_tanh", last_error_ho_tanh)
def multiple_plot(average_vals_list, std_dev_list, traj_list, other_labels, env_name, smoothing_window=5, no_show=False, ignore_std=False, limit=None, extra_lines=None):
# average_vals_list - list of numpy averages
# std_dev list - standard deviation or error
# traj_list - list of timestep (x-axis) quantities
# other_labels - the labels for the lines
# Env-name the header
# smoothing window how much to smooth using a running average.
fig = plt.figure(figsize=(16, 8))
# fig = plt.figure(figsize=(15, 10))
colors = ["#1f77b4", "#ff7f0e", "#d62728", "#9467bd", "#2ca02c", "#8c564b", "#e377c2", "#bcbd22", "#17becf"]
color_index = 0
ax = plt.subplot() # Defines ax variable by creating an empty plot
offset = 1
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(22)
if traj_list is None:
traj_list = [None]*len(average_vals_list)
index = 0
for average_vals, std_dev, label, trajs in zip(average_vals_list, std_dev_list, other_labels[:len(average_vals_list)], traj_list):
index += 1
rewards_smoothed_1 = pd.Series(average_vals).rolling(smoothing_window, min_periods=smoothing_window).mean()[:limit]
if limit is None:
limit = len(rewards_smoothed_1)
rewards_smoothed_1 = rewards_smoothed_1[:limit]
std_dev = std_dev[:limit]
if trajs is None:
trajs = list(range(len(rewards_smoothed_1)))
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.xaxis.get_offset_text().set_fontsize(20)
fill_color = colors[color_index]#choice(colors, 1)
color_index += 1
cum_rwd_1, = plt.plot(trajs, rewards_smoothed_1, label=label, color=fill_color)
offset += 3
if not ignore_std:
#plt.errorbar(trajs[::25 + offset], rewards_smoothed_1[::25 + offset], yerr=std_dev[::25 + offset], linestyle='None', color=fill_color, capsize=5)
plt.fill_between(trajs, rewards_smoothed_1 + std_dev, rewards_smoothed_1 - std_dev, alpha=0.3, edgecolor=fill_color, facecolor=fill_color)
if extra_lines:
for lin in extra_lines:
plt.plot(trajs, np.repeat(lin, len(rewards_smoothed_1)), linestyle='-.', color = colors[color_index], linewidth=2.5, label=other_labels[index])
color_index += 1
index += 1
axis_font = {'fontname':'Arial', 'size':'28'}
plt.legend(loc='lower right', prop={'size' : 16})
plt.xlabel("Iterations", **axis_font)
if traj_list:
plt.xlabel("Timesteps", **axis_font)
else:
plt.xlabel("Iterations", **axis_font)
plt.ylabel("Average Return", **axis_font)
plt.title("%s"% env_name, **axis_font)
if no_show:
fig.savefig('%s.png' % env_name, dpi=fig.dpi)
else:
plt.show()
return fig
def get_plot(stats1, stats2, stats3, smoothing_window=5, noshow=False):
## Figure 1
fig = plt.figure(figsize=(70, 40))
rewards_smoothed_1 = pd.Series(stats1).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed_2 = pd.Series(stats2).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed_3 = pd.Series(stats3).rolling(smoothing_window, min_periods=smoothing_window).mean()
cum_rwd_1, = plt.plot(eps, rewards_smoothed_1, color = "red", linewidth=2.5, label="Policy Network Activation = ReLU")
plt.fill_between( eps, rewards_smoothed_1 + std_hs_relu, rewards_smoothed_1 - std_hs_relu, alpha=0.2, edgecolor='red', facecolor='red')
cum_rwd_2, = plt.plot(eps, rewards_smoothed_2, color = "blue", linewidth=2.5, label="Policy Network Activation = TanH" )
plt.fill_between( eps, rewards_smoothed_2 + std_hs_tanh, rewards_smoothed_2 - std_hs_tanh, alpha=0.2, edgecolor='blue', facecolor='blue')
cum_rwd_3, = plt.plot(eps, rewards_smoothed_3, color = "black", linewidth=2.5, label="Policy Network Activation = Leaky ReLU" )
plt.fill_between( eps, rewards_smoothed_3 + std_hs_leaky, rewards_smoothed_3 - std_hs_leaky, alpha=0.2, edgecolor='black', facecolor='black')
plt.legend(handles=[cum_rwd_1, cum_rwd_2, cum_rwd_3], fontsize=22)
plt.xlabel("Number of Iterations",fontsize=26)
plt.ylabel("Average Returns", fontsize=26)
plt.title("DDPG with HalfCheetah Environment - Actor Network Activations", fontsize=30)
plt.show()
fig.savefig('ddpg_halfcheetah_policy_activations.png')
return fig
def main():
timesteps_per_epoch = 2000
max_timesteps = 2e6
plot_multiple(
[mean_ho_relu, mean_ho_tanh, mean_ho_leaky],
[std_ho_relu, std_ho_tanh, std_ho_leaky],
[range(0, max_timesteps, timesteps_per_epoch)]*3,
["relu", "tanh", "leaky_relu"],
"HalfCheetah-v1 (DDPG, Policy Network Activation)")
if __name__ == '__main__':
main() | [
"riashat.islam.93@gmail.com"
] | riashat.islam.93@gmail.com |
7c282c200459bd553ba96067cc096214498e9fc3 | a2080cbcf9694ad03690769cfc64d85a57f1d9d5 | /tests/type/test_directives.py | 83147cd74eda1f3a3c5616fe6181eed9445f576d | [
"MIT"
] | permissive | wuyuanyi135/graphql-core | 84196a47aec0f9508db3f8aadb8951b9fc9b9fe0 | 169ae7bced0f515603e97f1def925f3d062e5009 | refs/heads/main | 2023-04-13T11:38:10.815573 | 2021-05-02T05:17:29 | 2021-05-02T05:21:58 | 363,327,364 | 1 | 0 | MIT | 2021-05-01T05:05:29 | 2021-05-01T05:05:28 | null | UTF-8 | Python | false | false | 8,101 | py | from pytest import raises # type: ignore
from graphql.language import DirectiveLocation, DirectiveDefinitionNode, Node
from graphql.type import GraphQLArgument, GraphQLDirective, GraphQLInt, GraphQLString
def describe_type_system_directive():
def can_create_instance():
arg = GraphQLArgument(GraphQLString, description="arg description")
node = DirectiveDefinitionNode()
locations = [DirectiveLocation.SCHEMA, DirectiveLocation.OBJECT]
directive = GraphQLDirective(
name="test",
locations=[DirectiveLocation.SCHEMA, DirectiveLocation.OBJECT],
args={"arg": arg},
description="test description",
is_repeatable=True,
ast_node=node,
)
assert directive.name == "test"
assert directive.locations == locations
assert directive.args == {"arg": arg}
assert directive.is_repeatable is True
assert directive.description == "test description"
assert directive.extensions is None
assert directive.ast_node is node
def defines_a_directive_with_no_args():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations=locations)
assert directive.name == "Foo"
assert directive.args == {}
assert directive.is_repeatable is False
assert directive.extensions is None
assert directive.locations == locations
def defines_a_directive_with_multiple_args():
args = {
"foo": GraphQLArgument(GraphQLString),
"bar": GraphQLArgument(GraphQLInt),
}
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations=locations, args=args)
assert directive.name == "Foo"
assert directive.args == args
assert directive.is_repeatable is False
assert directive.locations == locations
def defines_a_repeatable_directive():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", is_repeatable=True, locations=locations)
assert directive.name == "Foo"
assert directive.args == {}
assert directive.is_repeatable is True
assert directive.locations == locations
def directive_accepts_input_types_as_arguments():
# noinspection PyTypeChecker
directive = GraphQLDirective(
name="Foo", locations=[], args={"arg": GraphQLString} # type: ignore
)
arg = directive.args["arg"]
assert isinstance(arg, GraphQLArgument)
assert arg.type is GraphQLString
def directive_accepts_strings_as_locations():
# noinspection PyTypeChecker
directive = GraphQLDirective(
name="Foo", locations=["SCHEMA", "OBJECT"] # type: ignore
)
assert directive.locations == [
DirectiveLocation.SCHEMA,
DirectiveLocation.OBJECT,
]
def directive_has_str():
directive = GraphQLDirective("foo", [])
assert str(directive) == "@foo"
def directive_has_repr():
directive = GraphQLDirective("foo", [])
assert repr(directive) == "<GraphQLDirective(@foo)>"
def can_compare_with_other_source_directive():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations)
assert directive == directive
assert not directive != directive
assert not directive == {}
assert directive != {}
same_directive = GraphQLDirective("Foo", locations)
assert directive == same_directive
assert not directive != same_directive
other_directive = GraphQLDirective("Bar", locations)
assert not directive == other_directive
assert directive != other_directive
other_locations = [DirectiveLocation.MUTATION]
other_directive = GraphQLDirective("Foo", other_locations)
assert not directive == other_directive
assert directive != other_directive
other_directive = GraphQLDirective("Foo", locations, is_repeatable=True)
assert not directive == other_directive
assert directive != other_directive
other_directive = GraphQLDirective("Foo", locations, description="other")
assert not directive == other_directive
assert directive != other_directive
def rejects_an_unnamed_directive():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(None, locations=[]) # type: ignore
assert str(exc_info.value) == "Directive must be named."
def rejects_a_directive_with_incorrectly_typed_name():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective({"bad": True}, locations=[]) # type: ignore
assert str(exc_info.value) == "The directive name must be a string."
def rejects_a_directive_with_incorrectly_typed_args():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], args=["arg"]) # type: ignore
assert str(exc_info.value) == (
"Foo args must be a dict with argument names as keys."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo",
locations=[],
args={1: GraphQLArgument(GraphQLString)}, # type: ignore
)
assert str(exc_info.value) == (
"Foo args must be a dict with argument names as keys."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo",
locations=[],
args={"arg": GraphQLDirective("Bar", [])}, # type: ignore
)
assert str(exc_info.value) == (
"Foo args must be GraphQLArgument or input type objects."
)
def rejects_a_directive_with_incorrectly_typed_repeatable_flag():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], is_repeatable=None) # type: ignore
assert str(exc_info.value) == "Foo is_repeatable flag must be True or False."
def rejects_a_directive_with_undefined_locations():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=None) # type: ignore
assert str(exc_info.value) == (
"Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
def rejects_a_directive_with_incorrectly_typed_locations():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations="bad") # type: ignore
assert (
str(exc_info.value) == "Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=["bad"]) # type: ignore
assert str(exc_info.value) == (
"Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
def rejects_a_directive_with_incorrectly_typed_description():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo", locations=[], description={"bad": True} # type: ignore
)
assert str(exc_info.value) == "Foo description must be a string."
def rejects_a_directive_with_incorrectly_typed_ast_node():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], ast_node=Node()) # type: ignore
assert str(exc_info.value) == (
"Foo AST node must be a DirectiveDefinitionNode."
)
| [
"cito@online.de"
] | cito@online.de |
7a197fe4699e4b70d97f31eb1a9343d397616030 | 53e2aabd85f3154f5c3c79d26fadf094ff694d92 | /Etl.Highlight/test.py | f2b6790f36ab963b6eafc13ac3ecccbdba9a427c | [] | no_license | yuchanmo/Upbit | 69446b08eb86692df5b2c68886d71310d7e226f2 | d7106579d644286b1305c0de370501821d6f499c | refs/heads/master | 2023-08-18T01:56:45.210567 | 2021-09-28T12:15:43 | 2021-09-28T12:15:43 | 411,268,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | from dbconnector import sqlserver
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import datetime as dt
import numpy as np
import requests
from sklearn.preprocessing import MinMaxScaler
df = pd.read_sql("SELECT * FROM [CoinStar].[dbo].[MarketPrice] where market = 'KRW-OMG' and regdate between '2021-05-01 10:29' and '2021-05-01 10:42:05'",sqlserver)
df.set_index('regdate',inplace=True)
df['trade_price']
df = df.sort_index()
print(df['market'].unique())
df[['cur_acc_trade_price','cur_trade_volume']] = df[['acc_trade_price','acc_trade_volume']].diff().apply(pd.Series)
trade_number_df = df[['cur_acc_trade_price','cur_trade_volume','trade_price']]
trade_number_df[['coef_cur_acc_trade_price','coef_cur_trade_volume','coef_trade_price']] = ((trade_number_df - trade_number_df.shift(1))/trade_number_df.shift(1)).apply(pd.Series)
cur_trade_number = trade_number_df.iloc[-1]
df['avg_5'] = df['trade_price'].rolling(window=5,min_periods=1).mean()
df['avg_10'] = df['trade_price'].rolling(window=10,min_periods=1).mean()
df['avg_20'] = df['trade_price'].rolling(window=20,min_periods=1).mean()
df[['trade_price','avg_5','avg_10','avg_20']].plot()
plt.show()
tail_df = df.tail(5)
y = tail_df[['avg_5','avg_10','avg_20']].values
y= MinMaxScaler().fit(y).transform(y)
np.linspace(0,1,5)
x = np.linspace(0,1,5).reshape(-1,1)
coefs = LinearRegression().fit(x,y).coef_
last_row = df.iloc[-1]
cur_trade_number['avg_5'] = last_row['avg_5']
cur_trade_number['avg_10'] = last_row['avg_10']
cur_trade_number['avg_20'] = last_row['avg_20']
cur_trade_number['avg_5_coef'] = coefs[0][0]
cur_trade_number['avg_10_coef'] = coefs[1][0]
cur_trade_number['avg_20_coef'] = coefs[2][0]
res_df = cur_trade_number.to_frame().T
res_df['posi_coef'] = np.all(coefs>0)
res_df['correct_order_avg'] = last_row['avg_5'] >= last_row['avg_10'] >= last_row['avg_20']
res_df['market'] = last_row['market']
res_df.reset_index()
res_df.iloc[0] | [
"mojjijji@gmail.com"
] | mojjijji@gmail.com |
fe0eb734470c894706cf2b66195c37013924e275 | 49b33bd602e9e003c4017eb1d9852dec31a34cad | /wowza_ec2_bootstrapper/actions/set_config.py | 0a76884b6618e83ae8709df4c9ef0a0e9c433cec | [] | no_license | nocarryr/wowza-ec2-bootstrapper | 4dba7180c594a708edd6502c5530a1ab5a98ab12 | 0a4f9e31f4ea8422ea3234d5a76144d5917017a1 | refs/heads/master | 2021-01-10T05:46:29.216619 | 2015-10-20T16:20:34 | 2015-10-20T16:20:34 | 43,161,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | import os
import requests
from wowza_ec2_bootstrapper.actions import BaseAction
class SetConfig(BaseAction):
action_fields = dict(
server_license={
'required':False,
'help':'Wowza License Key',
},
users={
'required':False,
'help':'A list of dicts containing admin user data ("name", "password" and "group")'
},
publish_users={
'required':False,
'help':'A list of dicts containing publisher users ("name", "password")'
},
conf_files={
'required':False,
'help':
'''A list of dicts for conf files to replace containing:
"path" : conf filename relative to Wowza root
"content" : contents for the file (if not given, "url" must be supplied)
"url": url to retrieve the contents for the file
'''
},
)
@property
def conf_path(self):
p = getattr(self, '_conf_path', None)
if p is None:
c = self.config.wowza
p = self._conf_path = os.path.join(c.root_path, 'conf')
return p
def build_filename(self, *args):
return os.path.join(self.conf_path, *args)
def do_action(self, **kwargs):
c = self.config.wowza
for key in ['server_license', 'users', 'publish_users', 'conf_files']:
if key in kwargs:
c.setdefault(key, kwargs[key])
if c.get('server_license'):
self.set_server_license()
if c.get('users'):
self.set_users()
if c.get('publish_users'):
self.set_publish_users()
if c.get('conf_files'):
self.copy_files()
def set_server_license(self):
c = self.config.wowza
fn = self.build_filename('Server.license')
with open(fn, 'w') as f:
f.write(c.server_license)
def set_users(self):
c = self.config.wowza
fn = self.build_filename('admin.password')
lines = []
keys = ['name', 'password', 'group']
for user in c.users:
user.setdefault('group', 'admin')
lines.append(' '.join([user.get(key) for key in keys]))
with open(fn, 'w') as f:
f.write('\n'.join(lines))
def set_publish_users(self):
c = self.config.wowza
fn = self.build_filename('publish.password')
lines = []
keys = ['name', 'password']
for user in c.publish_users:
lines.append(' '.join([user.get(key) for key in keys]))
with open(fn, 'w') as f:
f.write('\n'.join(lines))
def copy_files(self):
c = self.config.wowza
for file_info in c.conf_files:
content = file_info.get('content')
if content is None:
url = file_info['url']
r = requests.get(url)
content = r.content
fn = os.path.join(c.root_path, file_info['path'])
with open(fn, 'wb') as f:
f.write(content)
| [
"matt@nomadic-recording.com"
] | matt@nomadic-recording.com |
766714b71a03a0bfe48dbaae5c793aea50540062 | ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e | /src/user/migrations/0026_auto_20200117_1954.py | 0d698365c342a20c5d63847872745908c6e77769 | [
"MIT"
] | permissive | Garinmckayl/researchhub-backend | 46a17513c2c9928e51db4b2ce5a5b62df453f066 | cd135076d9a3b49a08456f7ca3bb18ff35a78b95 | refs/heads/master | 2023-06-17T04:37:23.041787 | 2021-05-18T01:26:46 | 2021-05-18T01:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Generated by Django 2.2.8 on 2020-01-17 19:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0025_auto_20200117_1954'),
]
operations = [
migrations.RenameField(
model_name='action',
old_name='hub',
new_name='hubs',
),
]
| [
"lightning.lu7@gmail.com"
] | lightning.lu7@gmail.com |
efef0ce5c060b5976beca74ab1d52ce316fc24fe | 03a22b3c00dc5188da3ed1a19077874e3ad786c5 | /futoin/cid/rmstool.py | aea53cbbfb428d40c425fdabe9c6f726303a4657 | [
"Apache-2.0"
] | permissive | iforgotband/cid-tool | fdf050169e5aa895ded9d9efb2741860ecd91a34 | f7d0e53057ecff156cf52c8dcae80c6408fb37d8 | refs/heads/master | 2021-08-15T02:51:17.717986 | 2017-11-17T07:59:24 | 2017-11-17T07:59:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | #
# Copyright 2015-2017 (c) Andrey Galkin
#
from .subtool import SubTool
__all__ = ['RmsTool']
class RmsTool(SubTool):
__slots__ = ()
ALLOWED_HASH_TYPES = [
'md5',
'sha1',
'sha256',
'sha512',
]
def autoDetect(self, config):
if self._autoDetectRMS(config):
return True
return super(RmsTool, self).autoDetect(config)
def rmsUpload(self, config, rms_pool, package_list):
raise NotImplementedError(self._name)
def rmsPromote(self, config, src_pool, dst_pool, package_list):
raise NotImplementedError(self._name)
def rmsGetList(self, config, rms_pool, package_hint):
raise NotImplementedError(self._name)
def rmsRetrieve(self, config, rms_pool, package_list):
raise NotImplementedError(self._name)
def rmsPoolCreate(self, config, rms_pool):
raise NotImplementedError(self._name)
def rmsPoolList(self, config):
raise NotImplementedError(self._name)
def rmsGetHash(self, config, rms_pool, package, hash_type):
raise NotImplementedError(self._name)
def _autoDetectRMS(self, config):
if config.get('rms', None) == self._name:
return True
return False
def rmsProcessChecksums(self, config, rms_pool, package_list):
ret = []
for package in package_list:
package = package.split('@', 1)
filename = package[0]
if len(package) == 2:
hash_str = package[1]
hash_type, hash = hash_str.split(':', 1)
if hash_type not in self.ALLOWED_HASH_TYPES:
self._errorExit(
'Unsupported hash type "{0}"'.format(hash_type))
self._info('Verifying {2} hash of {0} in {1}'.format(
filename, rms_pool, hash_type))
rms_hash = self.rmsGetHash(
config, rms_pool, filename, hash_type)
if rms_hash != hash:
self._errorExit(
'RMS hash mismatch "{0}" != "{1}"'.format(rms_hash, hash))
ret.append(filename)
return ret
def rmsCalcHash(self, file_name, hash_type):
hashlib = self._ext.hashlib
hf = hashlib.new(hash_type)
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(65536), ''):
if not chunk:
break
hf.update(chunk)
return "{0}:{1}".format(hash_type, hf.hexdigest())
def rmsCalcHashes(self, file_name):
hashlib = self._ext.hashlib
hashes = {}
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type] = hashlib.new(hash_type)
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(65536), ''):
if not chunk:
break
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type].update(chunk)
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type] = hashes[hash_type].hexdigest()
return hashes
| [
"andrey@futoin.org"
] | andrey@futoin.org |
6ce7bc39c05aee5ebed9f3370a45057595d99e7f | 9f99485ac5479c1e6169e71d88a33c31ff591f4e | /migrations/versions/0021.py | 4a35b8ca73df34418a507a045bfdbb03a1f849f7 | [
"MIT"
] | permissive | NewAcropolis/api | b8c65554ca78ac0e87fbef46f5f2fbecb6d7700a | 34367f55d3c9ee5bf870956ffc90fd23da559b15 | refs/heads/master | 2023-08-31T09:27:02.125549 | 2023-08-26T22:15:10 | 2023-08-26T22:15:10 | 99,582,634 | 1 | 1 | MIT | 2023-08-26T22:15:11 | 2017-08-07T13:46:23 | Python | UTF-8 | Python | false | false | 773 | py | """empty message
Revision ID: 0021 allow access_area nullable
Revises: 0020 add users
Create Date: 2019-02-02 13:42:31.511289
"""
# revision identifiers, used by Alembic.
revision = '0021 allow access_area nullable'
down_revision = '0020 add users'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'access_area',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'access_area',
existing_type=sa.VARCHAR(),
nullable=False)
# ### end Alembic commands ###
| [
"kenlt.uk@gmail.com"
] | kenlt.uk@gmail.com |
2242d8d1034270b8cfee1d019fcede2872faaa7d | 42d3d37a3dd22402154da4f4bd020afd7b7bad58 | /examples/adspygoogle/adwords/v201206/campaign_management/add_location_extension_override.py | db059a6c791048bd092aec25eff36d441d6cba7d | [
"Apache-2.0"
] | permissive | nearlyfreeapps/python-googleadwords | 1388316ec4f8d9d6074688ec4742872b34b67636 | b30d90f74248cfd5ca52967e9ee77fc4cd1b9abc | refs/heads/master | 2020-06-03T23:05:08.865535 | 2012-08-02T21:46:16 | 2012-08-02T21:46:16 | 5,278,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,536 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
def main(client, ad_id, ad_extension_id):
# Initialize appropriate service.
geo_location_service = client.GetGeoLocationService(
'https://adwords-sandbox.google.com', 'v201206')
ad_extension_override_service = client.GetAdExtensionOverrideService(
'https://adwords-sandbox.google.com', 'v201206')
# Construct selector and get geo location info for a given address.
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
}
]
}
geo_location = geo_location_service.Get(selector)[0]
# Construct operations and add ad extension override.
operations = [
{
'operator': 'ADD',
'operand': {
'adId': ad_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'id': ad_extension_id,
'address': geo_location['address'],
'geoPoint': geo_location['geoPoint'],
'encodedLocation': geo_location['encodedLocation'],
'source': 'ADWORDS_FRONTEND',
# Optional fields.
'companyName': 'ACME Inc.',
'phoneNumber': '(650) 253-0000'
# 'iconMediaId': '...',
# 'imageMediaId': '...'
},
# Optional fields.
'overrideInfo': {
'LocationOverrideInfo': {
'radius': '5',
'radiusUnits': 'MILES'
}
}
}
}
]
ad_extensions = ad_extension_override_service.Mutate(operations)[0]
# Display results.
for ad_extension in ad_extensions['value']:
print ('Ad extension override with id \'%s\' for ad with id \'%s\' was '
'added.' % (ad_extension['adExtension']['id'], ad_extension['adId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| [
"ahalligan@nearlyfreehosting.com"
] | ahalligan@nearlyfreehosting.com |
b62071de2eebb19932cc24e94198d7d9b2505bc4 | 6af51aa6b83175acb256524beaf7972c92b58a74 | /python/QuickDS/implementations/list.py | 005373c66ec1445062c0ff1d12e97a97ac8aaabd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rishi772001/QuickDS | 17dc0aa52bfd65dc12f55cd33309dafdf66f52e7 | dfe29840c81a557f9dbc20dbc5088057e2d7b987 | refs/heads/master | 2023-02-21T04:34:52.354396 | 2021-01-25T13:39:58 | 2021-01-25T13:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | """
@Author: rishi
"""
# Import required modules
import random as rd
# Build List class
class List:
# Creates random list
@staticmethod
def create_random_list(length=10):
"""
:param length: length of the list
:return: list of random numbers
"""
return [rd.randint(0, length) for _ in range(length)]
# Create random 2d list
@staticmethod
def create_random_2d_list(row_length=4, col_length=4):
"""
:param row_length: row length of the list
:param col_length: col length of the list
:return: 2d list of random numbers
"""
return [[rd.randint(0, 10) for _ in range(row_length)] for _ in range(col_length)]
| [
"noreply@github.com"
] | rishi772001.noreply@github.com |
a032ae93a455de494d3208476a65b775ec1ba715 | 39cd9aa81927c20d85d1b65e55523455626ee902 | /python_work/chapter_6/exercises/6_6_polling.py | e73625d621232cceca5ee36ce3f1237f6ea02530 | [] | no_license | SMS-NED16/crash-course-python | acf363562a813f7deb36614dc935be4ed2d07fee | e6e6cb787d208f51f114f71331c43af1ddc1e4c2 | refs/heads/master | 2020-03-09T02:29:35.241621 | 2018-04-21T16:09:16 | 2018-04-21T16:09:16 | 128,541,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #dict of people and their favourite languages
favourite_languages = {
'sarah' : 'c',
'john' : 'python',
'brad' : 'c++',
'mike' : 'ruby',
'jessica' : 'java'
}
#list of people
respondents = ['sarah', 'jessica', 'andrew', 'mike', 'gilfoyle',
'peyton', 'joaquin', 'brad']
#parse list of names, check if they have taken the survey
for name in respondents:
if name not in favourite_languages.keys():
print(name.title() + ", you should take this survey.")
else:
print("Thank you for taking the survey, " + name.title() + ".")
print() | [
"saadmsiddiqui96@gmail.com"
] | saadmsiddiqui96@gmail.com |
0b1e777da2a004715ed496397a5acf96ebf6b323 | 7c5ed3cbbd777d6cf3789f48e82cedbf2cec0539 | /functions/net/icmp/ping-nodes-with-thread-and-save-results.py | fde628356b9022e0bfd0b49ad5bb297d472fa470 | [] | no_license | pench3r/ShellScriptForDevOps | 8735b22b04fd2c5c4de76c5c6ebe63563c215c5c | 2df2911ebe567daeeae493a57a31f356e305089a | refs/heads/master | 2023-03-23T09:23:13.964646 | 2021-03-22T02:46:31 | 2021-03-22T02:46:31 | 350,180,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by PyCharm.
File Name: LinuxBashShellScriptForOps:ping-nodes-with-thread-and-save-results.py
Version: 0.0.1
Author: dgden
Author Email: liuhongda@didiglobal.com
Create Date: 2020/1/19
Create Time: 11:07
Description: ping nodes with multi-threading and save results into a file
Long Description:
References:
Prerequisites: pip install ping
Development Status: 3 - Alpha, 5 - Production/Stable
Environment: Console
Intended Audience: System Administrators, Developers, End Users/Desktop
License: Freeware, Freely Distributable
Natural Language: English, Chinese (Simplified)
Operating System: POSIX :: Linux, Microsoft :: Windows
Programming Language: Python :: 2.6
Programming Language: Python :: 2.7
Topic: Utilities
"""
import time
from multiprocessing.pool import ThreadPool
from threading import Lock
import ping
def is_node_alive_with_icmp_ping(ip):
percent_lost, mrtt, artt = ping.quiet_ping(ip, timeout=1, count=1, psize=64)
if percent_lost == 0:
return True
else:
lock.acquire()
with open(dbf, 'a') as fp:
fp.write(ip + " " + now + "\n" * 2)
lock.release()
return False
if __name__ == '__main__':
nodes_list = [
'192.168.88.3',
'192.168.88.12',
'192.168.88.4',
'192.168.88.8',
'192.168.88.15',
]
dbf = "ping_nodes_result.txt"
now = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
with open(dbf, 'w') as fp_init:
fp_init.write(now + "\n")
lock = Lock()
try:
while True:
processes_count = 254 if len(nodes_list) > 254 else len(nodes_list)
pool = ThreadPool(processes=processes_count)
pool.map(is_node_alive_with_icmp_ping, nodes_list)
pool.close()
pool.join()
time.sleep(1)
except KeyboardInterrupt:
print("canceled")
| [
"nageshangzei@gmail.com"
] | nageshangzei@gmail.com |
2856e10c6c27a1b2d3da8c8fbb72be1733014051 | a2211f0ef8297a77200a0b2eec8ba3476989b7e6 | /itcast/02_python核心编程/02_linux系统编程/day02_线程/demo09_轮询法-消除竞争状态.py | 1112eea51a6e9cce12ac7813f2e0563a914bd6ed | [] | no_license | qq1197977022/learnPython | f720ecffd2a70044f1644f3527f4c29692eb2233 | ba294b8fa930f784304771be451d7b5981b794f3 | refs/heads/master | 2020-03-25T09:23:12.407510 | 2018-09-16T00:41:56 | 2018-09-16T00:42:00 | 143,663,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from threading import Thread
g_num = 0
g_flag = True
def fun1():
global g_num
global g_flag
if g_flag:
for i in range(1000000):
g_num += 1
g_flag = False
print(f'线程1{g_num:+>20}\t{id(g_num)}')
def fun2():
global g_num
global g_flag
# 轮询
while True: # 如果不要求实时, 设置休眠延时更好
if not g_flag:
for i in range(1000000):
g_num += 1
print(f'线程2{g_num:*>20}\t{id(g_num)}')
break
if __name__ == '__main__':
print(f'主线程{g_num:->50}\t{id(g_num)}')
p1 = Thread(target=fun1)
p1.start()
print(f'主线程{g_num:->60}\t\t{id(g_num)}')
p2 = Thread(target=fun2)
p2.start()
print(f'主线程{g_num:->70}\t{id(g_num)}')
| [
"1197977022@qq.com"
] | 1197977022@qq.com |
b113c837d2138196777156e7cf6f982f1f752475 | 2f0d56cdcc4db54f9484b3942db88d79a4215408 | /.history/Python_Learning/lesson17_20200503135756.py | a34d2eb01fc8a23dd6720f62c11d594ccb6dc60d | [] | no_license | xiangxing98/xiangxing98.github.io | 8571c8ee8509c0bccbb6c2f3740494eedc53e418 | 23618666363ecc6d4acd1a8662ea366ddf2e6155 | refs/heads/master | 2021-11-17T19:00:16.347567 | 2021-11-14T08:35:01 | 2021-11-14T08:35:01 | 33,877,060 | 7 | 1 | null | 2017-07-01T16:42:49 | 2015-04-13T15:35:01 | HTML | UTF-8 | Python | false | false | 1,178 | py | # -*- encoding: utf-8 -*-
# !/usr/bin/env python
'''
@File : lesson17.py
@Time : 2020/04/19 21:42:24
@Author : Stone_Hou
@Version : 1.0
@Contact : xiangxing985529@163.com
@License : (C)Copyright 2010-2020, Stone_Hou
@Desc : None
'''
# here put the import lib
# 类型转换
a = 1
print(a, type(a))
# 1 <class 'int'>
a = 'hello'
print(a, type(a))
# hello <class 'str'>
a = True
print(a, type(a))
# True <class 'bool'>
# print('Hello'+1)
# TypeError: can only concatenate str (not "int") to str
# 发生异常: TypeError
# can only concatenate str (not "int") to str
# File "F:\Github\xiangxing98.github.io\Python_Learning\lesson17.py"
# line 28, in <module>
# print('Hello'+1)
print('Hello'+str(1))
# print('hello%d' % '123')
# 发生异常: TypeError
# %d format: a number is required, not str
print("Change String to integer\n")
print('hello%d' % int('123'))
# hello123
# Those statement all true
print(int('123') == 123)
# True
print(float('3.3') == 3.3)
# True
print(str(111) == '111')
# True
print(if(bool(0) == False):False)
# bool
print("bool \n")
bool(-123)
# True
bool(0)
# False
bool('abc')
# True
bool('False')
# True
bool('')
# False
| [
"xiangxing985529@163.com"
] | xiangxing985529@163.com |
723cafe05499d104d0a4bcd08b6a751d0732586e | 7d23afa21ca9653ea337fbb01ba54e9488de8a80 | /autosuggest/asgi.py | 043460afdf64407137e1bebcf16eb3810e094fef | [] | no_license | clarkeustaquio/NLP-WordSuggestion | 65915d5ea11f9684ecd7bea97fdfcdf71d6b9c77 | cb681f24e1e55570b8d943bf631d8a6bd2e24285 | refs/heads/main | 2023-07-08T16:19:41.713217 | 2021-08-06T03:22:04 | 2021-08-06T03:22:04 | 386,520,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for autosuggest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autosuggest.settings')
application = get_asgi_application()
| [
"clark.eustaquio@gmail.com"
] | clark.eustaquio@gmail.com |
7003ec4ae5e020487c4ff6ad970d7a07dd84868b | 3d16bcf91c546dfc638bf9e48d7690e8aed37ee2 | /tests/Cpl/Dm/Persistent/_0test/realtime/windows/mingw_w64/mytoolchain.py | 4ca8928d0d14090c111788daa7ab0f54e695bd58 | [] | no_license | johnttaylor/colony.core | 7c3aa43abdd564689e1540795b8044228b97271c | e00902d33c9224a34e9f68edb02c18eb9571b09f | refs/heads/master | 2023-07-24T08:34:04.956247 | 2023-06-20T00:02:55 | 2023-06-20T00:02:55 | 31,176,673 | 2 | 2 | null | 2023-06-17T21:56:08 | 2015-02-22T19:38:07 | C | UTF-8 | Python | false | false | 4,935 | py | #---------------------------------------------------------------------------
# This python module is used to customize a supported toolchain for your
# project specific settings.
#
# Notes:
# - ONLY edit/add statements in the sections marked by BEGIN/END EDITS
# markers.
# - Maintain indentation level and use spaces (it's a python thing)
# - rvalues must be enclosed in quotes (single ' ' or double " ")
# - The structure/class 'BuildValues' contains (at a minimum the
# following data members. Any member not specifically set defaults
# to null/empty string
# .inc
# .asminc
# .cflags
# .cppflags
# .asmflags
# .linkflags
# .linklibs
#
#---------------------------------------------------------------------------
# get definition of the Options structure
from nqbplib.base import BuildValues
from nqbplib.my_globals import NQBP_WORK_ROOT
#===================================================
# BEGIN EDITS/CUSTOMIZATIONS
#---------------------------------------------------
# Set the name for the final output item
FINAL_OUTPUT_NAME = 'a.exe'
# Link unittest directory by object module so that Catch's self-registration mechanism 'works'
unit_test_objects = '_BUILT_DIR_.src/Cpl/Dm/Persistent/_0test'
#
# For build config/variant: "Release"
#
# Set project specific 'base' (i.e always used) options
base_release = BuildValues() # Do NOT comment out this line
base_release.cflags = '-m32 -std=c++11 -Wall -Werror -x c++ -fprofile-arcs -ftest-coverage -DCATCH_CONFIG_FAST_COMPILE'
base_release.linkflags = '-m32 -fprofile-arcs'
base_release.linklibs = '-lgcov'
base_release.firstobjs = unit_test_objects
# Set project specific 'optimized' options
optimzed_release = BuildValues() # Do NOT comment out this line
optimzed_release.cflags = '-O3'
optimzed_release.linklibs = '-lstdc++'
# Set project specific 'debug' options
debug_release = BuildValues() # Do NOT comment out this line
debug_release.linklibs = '-lstdc++'
#
# For build config/variant: "cpp11"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_cpp11 = BuildValues()
optimzed_cpp11 = BuildValues()
debug_cpp11 = BuildValues()
# Set 'base' options
base_cpp11.cflags = '-m64 -std=c++11 -Wall -Werror -x c++ -DCATCH_CONFIG_FAST_COMPILE'
base_cpp11.linkflags = '-m64'
base_cpp11.firstobjs = unit_test_objects
# Set 'Optimized' options
optimzed_cpp11.cflags = '-O3'
optimzed_cpp11.linklibs = '-lstdc++'
# Set 'debug' options
debug_cpp11.linklibs = '-lstdc++'
#
# For build config/variant: "win64"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_win64 = BuildValues()
optimzed_win64 = BuildValues()
debug_win64 = BuildValues()
# Set 'base' options
base_win64.cflags = '-m64 -std=c++11 -Wall -Werror -x c++ -DCATCH_CONFIG_FAST_COMPILE'
base_win64.linkflags = '-m64'
base_win64.firstobjs = unit_test_objects
# Set 'Optimized' options
optimzed_win64.cflags = '-O3'
optimzed_win64.linklibs = '-lstdc++'
# Set 'debug' options
debug_win64.linklibs = '-lstdc++'
#-------------------------------------------------
# ONLY edit this section if you are ADDING options
# for build configurations/variants OTHER than the
# 'release' build
#-------------------------------------------------
release_opts = { 'user_base':base_release,
'user_optimized':optimzed_release,
'user_debug':debug_release
}
# Add new dictionary of for new build configuration options
cpp11_opts = { 'user_base':base_cpp11,
'user_optimized':optimzed_cpp11,
'user_debug':debug_cpp11
}
# Add new dictionary of for new build configuration options
win64_opts = { 'user_base':base_win64,
'user_optimized':optimzed_win64,
'user_debug':debug_win64
}
# Add new variant option dictionary to # dictionary of
# build variants
build_variants = { 'win32':release_opts,
'win64':win64_opts,
'cpp11':cpp11_opts,
}
#---------------------------------------------------
# END EDITS/CUSTOMIZATIONS
#===================================================
# Capture project/build directory
import os
prjdir = os.path.dirname(os.path.abspath(__file__))
# Select Module that contains the desired toolchain
from nqbplib.toolchains.windows.mingw_w64.console_exe import ToolChain
# Function that instantiates an instance of the toolchain
def create():
tc = ToolChain( FINAL_OUTPUT_NAME, prjdir, build_variants, "win32" )
return tc
| [
"john.t.taylor@gmail.com"
] | john.t.taylor@gmail.com |
79c0640dfe81c2911cd35bee527ab1c36f02d83e | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /django_hg/admin.py | 6fc1e1082734fd909d4fff6278782f225444eeac | [
"BSD-2-Clause"
] | permissive | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from my_django.contrib import admin
from django_hg.models import HgRepository, RepositoryUser
class UserInline(admin.TabularInline):
model=RepositoryUser
class HgRepositoryAdmin(admin.ModelAdmin):
inlines=[UserInline,]
admin.site.register(HgRepository, HgRepositoryAdmin)
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
1d41c54ec8ce0af470639cebf2a279f1299f0c15 | 50cbc789f765610b1074b414a4cb5fbecb65b340 | /djangosige/apps/estoque/models/movimento.py | b23514c840fb941be3044a344591b3a4a1a54fa4 | [
"MIT"
] | permissive | jonatasoli/djangoSIGE | 6f79a92737e281ab6e999ad1353c5f24a0e54d97 | 31bba22cf5ce304bc96068e93d49002f99066218 | refs/heads/master | 2021-10-22T20:43:37.591470 | 2019-03-12T21:20:36 | 2019-03-12T21:20:36 | 109,386,717 | 1 | 0 | null | 2017-11-03T11:12:21 | 2017-11-03T11:12:20 | null | UTF-8 | Python | false | false | 4,915 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.core.validators import MinValueValidator
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import date
from . import DEFAULT_LOCAL_ID
import locale
locale.setlocale(locale.LC_ALL, '')
TIPOS_MOVIMENTO_ENTRADA = (
(u'0', u'Ajuste'),
(u'1', u'Entrada por pedido de compra'),
(u'2', u'Entrada por importação de nota fiscal de fornecedor'),
(u'3', u'Ajuste inicial'),
)
TIPOS_MOVIMENTO_SAIDA = (
(u'0', u'Ajuste'),
(u'1', u'Saída por pedido de venda'),
(u'2', u'Saída por importação de nota fiscal'),
)
class ItensMovimento(models.Model):
produto = models.ForeignKey('cadastro.Produto', related_name="moviment_estoque_produto",
on_delete=models.CASCADE, null=True, blank=True)
movimento_id = models.ForeignKey(
'estoque.MovimentoEstoque', related_name="itens_movimento", on_delete=models.CASCADE)
quantidade = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
valor_unit = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
subtotal = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
def get_estoque_atual_produto(self):
if self.produto:
if self.produto.controlar_estoque and self.produto.estoque_atual:
return self.produto.estoque_atual
else:
return 'Não controlado'
def format_estoque_atual_produto(self):
estoque_atual = self.get_estoque_atual_produto()
if isinstance(estoque_atual, Decimal):
return locale.format(u'%.2f', estoque_atual, 1)
else:
return estoque_atual
class MovimentoEstoque(models.Model):
data_movimento = models.DateField(null=True, blank=True)
quantidade_itens = models.IntegerField(
validators=[MinValueValidator(0)], default=0)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
observacoes = models.CharField(max_length=1055, null=True, blank=True)
class Meta:
verbose_name = "Movimento de Estoque"
permissions = (
("view_movimentoestoque", "Can view movimento estoque"),
("consultar_estoque", "Pode consultar estoque"),
)
@property
def format_data_movimento(self):
return '%s' % date(self.data_movimento, "d/m/Y")
def format_quantidade_itens(self):
return locale.format(u'%.2f', self.quantidade_itens, 1)
def format_valor_total(self):
return locale.format(u'%.2f', self.valor_total, 1)
class EntradaEstoque(MovimentoEstoque):
tipo_movimento = models.CharField(
max_length=1, choices=TIPOS_MOVIMENTO_ENTRADA, default='0')
pedido_compra = models.ForeignKey(
'compras.PedidoCompra', related_name="entrada_estoque_pedido", on_delete=models.SET_NULL, null=True, blank=True)
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="entrada_estoque_fornecedor", on_delete=models.SET_NULL, null=True, blank=True)
local_dest = models.ForeignKey(
'estoque.LocalEstoque', related_name="entrada_estoque_local", default=DEFAULT_LOCAL_ID)
def get_edit_url(self):
return reverse_lazy('estoque:detalharentradaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Entrada'
class SaidaEstoque(MovimentoEstoque):
tipo_movimento = models.CharField(
max_length=1, choices=TIPOS_MOVIMENTO_SAIDA, default='0')
pedido_venda = models.ForeignKey(
'vendas.PedidoVenda', related_name="saida_estoque", on_delete=models.SET_NULL, null=True, blank=True)
local_orig = models.ForeignKey(
'estoque.LocalEstoque', related_name="saida_estoque_local", default=DEFAULT_LOCAL_ID)
def get_edit_url(self):
return reverse_lazy('estoque:detalharsaidaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Saída'
class TransferenciaEstoque(MovimentoEstoque):
local_estoque_orig = models.ForeignKey(
'estoque.LocalEstoque', related_name="transf_estoque_orig", on_delete=models.CASCADE)
local_estoque_dest = models.ForeignKey(
'estoque.LocalEstoque', related_name="transf_estoque_dest", on_delete=models.CASCADE)
def get_edit_url(self):
return reverse_lazy('estoque:detalhartransferenciaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Transferência'
| [
"thiagovilelap@hotmail.com"
] | thiagovilelap@hotmail.com |
9472d2e6c1f8c59f279a64790c1afe940a3f77db | 7c74ceb9f8addcc0816d012e0b84b174b96e0def | /src/azure-cli-core/azure/cli/core/decorators.py | 55a1d88e11e60f053ba780fa7627e97a9dc7db78 | [
"MIT",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | microsoft/azure-cli | 4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9 | 9ba64b33f6f78e2c3e42f8a147f59484300e8779 | refs/heads/dev | 2023-08-31T08:51:39.526556 | 2022-11-28T19:08:23 | 2022-11-28T19:08:23 | 370,900,439 | 7 | 7 | MIT | 2023-08-01T23:34:50 | 2021-05-26T03:59:41 | Python | UTF-8 | Python | false | false | 2,777 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Utility decorators
This module will be executed in separate process after az process is terminated to upload traces, so it is preferable
that it doesn't import modules other than those in the Python Standard Library
"""
import hashlib
from functools import wraps
from knack.log import get_logger
# pylint: disable=too-few-public-methods
class Completer:
def __init__(self, func):
self.func = func
def __call__(self, **kwargs):
namespace = kwargs['parsed_args']
prefix = kwargs['prefix']
cmd = namespace._cmd # pylint: disable=protected-access
return self.func(cmd, prefix, namespace)
def call_once(factory_func):
""""
When a function is annotated by this decorator, it will be only executed once. The result will be cached and
returned for following invocations.
"""
factory_func.executed = False
factory_func.cached_result = None
def _wrapped(*args, **kwargs):
if not factory_func.executed:
factory_func.cached_result = factory_func(*args, **kwargs)
return factory_func.cached_result
return _wrapped
def hash256_result(func):
"""
Secure the return string of the annotated function with SHA256 algorithm. If the annotated function doesn't return
string or return None, raise ValueError.
"""
@wraps(func)
def _decorator(*args, **kwargs):
val = func(*args, **kwargs)
if val is None:
raise ValueError('Return value is None')
if not isinstance(val, str):
raise ValueError('Return value is not string')
if not val:
return val
hash_object = hashlib.sha256(val.encode('utf-8'))
return str(hash_object.hexdigest())
return _decorator
def suppress_all_exceptions(fallback_return=None, **kwargs): # pylint: disable=unused-argument
# The kwargs is a fallback to ensure extensions (eg. alias) are not broken
def _decorator(func):
@wraps(func)
def _wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception: # nopa pylint: disable=broad-except
import traceback
get_logger(__name__).info('Suppress exception:\n%s', traceback.format_exc())
if fallback_return is not None:
return fallback_return
return _wrapped_func
return _decorator
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
385a49c13b863a8896488f4156d05502aa2f7c80 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appcontainers/azure-mgmt-appcontainers/generated_samples/connected_environments_dapr_components_list_secrets.py | ea1924799cfe381f8684b2226852edd26e271f44 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,696 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.appcontainers import ContainerAppsAPIClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-appcontainers
# USAGE
python connected_environments_dapr_components_list_secrets.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerAppsAPIClient(
credential=DefaultAzureCredential(),
subscription_id="8efdecc5-919e-44eb-b179-915dca89ebf9",
)
response = client.connected_environments_dapr_components.list_secrets(
resource_group_name="examplerg",
connected_environment_name="myenvironment",
component_name="reddog",
)
print(response)
# x-ms-original-file: specification/app/resource-manager/Microsoft.App/stable/2023-05-01/examples/ConnectedEnvironmentsDaprComponents_ListSecrets.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9026865925b672227cd58a155c84cd23b74dd83d | a06cd01b474e74d7b57144fca3930b7a2fe01ec4 | /ethpm/validation/uri.py | 009ceaf58091465a85e442722fdaa395a10bf1f1 | [
"MIT"
] | permissive | XTAUEMC/web3.py | e95b9b2e8bb18cae9794a0eaecb2bef3cbd87ec6 | 36224cfd19c3cf50746ccdeae8521ce4c08b7e3a | refs/heads/master | 2020-06-20T00:01:35.345192 | 2019-07-14T19:56:28 | 2019-07-14T19:56:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,339 | py | import hashlib
from typing import (
List,
)
from urllib import (
parse,
)
from eth_utils import (
is_checksum_address,
to_bytes,
to_int,
to_text,
)
from ethpm._utils.chains import (
is_supported_chain_id,
)
from ethpm._utils.ipfs import (
is_ipfs_uri,
)
from ethpm._utils.registry import (
is_ens_domain,
)
from ethpm.constants import (
REGISTRY_URI_SCHEME,
)
from ethpm.exceptions import (
ValidationError,
)
from ethpm.validation.package import (
validate_package_name,
)
from web3 import Web3
def validate_ipfs_uri(uri: str) -> None:
"""
Raise an exception if the provided URI is not a valid IPFS URI.
"""
if not is_ipfs_uri(uri):
raise ValidationError(f"URI: {uri} is not a valid IPFS URI.")
def validate_registry_uri(uri: str) -> None:
"""
Raise an exception if the URI does not conform to the registry URI scheme.
"""
parsed = parse.urlparse(uri)
scheme, authority, pkg_name, query = (
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.query,
)
validate_registry_uri_scheme(scheme)
validate_registry_uri_authority(authority)
if query:
validate_registry_uri_version(query)
validate_package_name(pkg_name[1:])
def validate_registry_uri_authority(auth: str) -> None:
"""
Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address.
"""
try:
address, chain_id = auth.split(':')
except ValueError:
raise ValidationError(
f"{auth} is not a valid registry URI authority. "
"Please try again with a valid registry URI."
)
if is_ens_domain(address) is False and not is_checksum_address(address):
raise ValidationError(
f"{auth} is not a valid registry address. "
"Please try again with a valid registry URI."
)
if not is_supported_chain_id(to_int(text=chain_id)):
raise ValidationError(
f"Chain ID: {chain_id} is not supported. Supported chain ids include: "
"1 (mainnet), 3 (ropsten), 4 (rinkeby), 5 (goerli) and 42 (kovan). "
"Please try again with a valid registry URI."
)
def validate_registry_uri_scheme(scheme: str) -> None:
"""
Raise an exception if the scheme is not the valid registry URI scheme ('ercXXX').
"""
if scheme != REGISTRY_URI_SCHEME:
raise ValidationError(f"{scheme} is not a valid registry URI scheme.")
def validate_registry_uri_version(query: str) -> None:
"""
Raise an exception if the version param is malformed.
"""
query_dict = parse.parse_qs(query, keep_blank_values=True)
if "version" not in query_dict:
raise ValidationError(f"{query} is not a correctly formatted version param.")
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:
"""
Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance.
"""
from ethpm.uri import check_if_chain_matches_chain_uri
matching_uris = [
uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)
]
if not matching_uris:
raise ValidationError("Package has no matching URIs on chain.")
elif len(matching_uris) != 1:
raise ValidationError(
f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}."
)
return matching_uris[0]
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None:
"""
Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
"""
blob_path = parse.urlparse(blob_uri).path
blob_hash = blob_path.split("/")[-1]
contents_str = to_text(contents)
content_length = len(contents_str)
hashable_contents = "blob " + str(content_length) + "\0" + contents_str
hash_object = hashlib.sha1(to_bytes(text=hashable_contents))
if hash_object.hexdigest() != blob_hash:
raise ValidationError(
f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}."
)
| [
"nickgheorghita@gmail.com"
] | nickgheorghita@gmail.com |
151ef292dd5b4742d46553aee188acfd3a2cd8b2 | 1ff41a2393c969aaf662a198f405b4e76d4ce957 | /myshop/myshop/settings.py | f85c2d92c211522f0c679ef4a12a154d55a90ecd | [] | no_license | asimonia/django-bookstore | 7a15495c8a522a4c8f5151948d1a0f2a67a4f63a | 29fd151eb9269c3e43e1b2736f11f49247ec2994 | refs/heads/master | 2021-01-17T17:27:53.268028 | 2016-07-19T02:15:52 | 2016-07-19T02:15:52 | 63,547,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | """
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-kt3)=5e1730c!(18nq@zuo%e*%7yccy66f5*8_yyt+a&%)mod'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop',
'cart',
'orders',
'paypal.standard.ipn',
'payment',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Add key to store the cart in the user session
CART_SESSION_ID = 'cart'
# Send emails to the backend. Set up email later
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
PAYPAL_RECEIVER_EMAIL = 'alex@myshop.com'
PAYPAL_TEST = True | [
"alex.simonian@gmail.com"
] | alex.simonian@gmail.com |
c11fdcc792918562865cbdc580eb0b8eb3b9459a | 67df0894b961a8d1729214f07c954c551fd06f3f | /re_search_en/o_O_re_search_en.py | fbcb34853079f23d19e01ac81ea62b4f37047ea5 | [] | no_license | hihumi/enjapy | 9261b209e3dbe3628229acff0e425da08ab46caf | 4d7344cb7291abe4deb4bca42590693d89ba328e | refs/heads/master | 2020-05-23T10:11:01.260143 | 2017-09-30T06:10:22 | 2017-09-30T06:10:22 | 80,397,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | #!/usr/bin/env python3
"""main.pyで入力されたwordが、
oxxx_reモジュールのoxxx_re_func()で作成した正規表現のo、またはOからはじまる英単語と合致した場合、
oxxx_jaモジュールのoxxx_ja_func()を呼ぶ
ただし、o-listまたはO-list(すべて大文字小文字は問わない)と入力された場合、o_O_listモジュールのo_O_list_func()を呼ぶ
"""
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
#
# from re_en.oxxx_re import oxxx_re_func
# office:
from re_en.office_re import office_re_func
# order:
from re_en.order_re import order_re_func
# outlook:
from re_en.outlook_re import outlook_re_func
# 0-list:
from re_en.o_O_list_re import o_O_list_re_func
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
#
# from print_ja.oxxx_ja import oxxx_ja_func
# office:
from print_ja.office_ja import office_ja_func
# order:
from print_ja.order_ja import order_ja_func
# outlook:
from print_ja.outlook_ja import outlook_ja_func
from print_en_lists.o_O_list import o_O_list_func
def o_O_re_search_en_func(o_O_word):
"""main.pyで入力されたwordが、
oxxx_reモジュールのoxxx_re_func()で作成した正規表現のo、またはOからはじまる英単語と合致した場合、
oxxx_jaモジュールのoxxx_ja_func()を呼ぶ関数
ただし、最後のelifは、o_O_listモジュールのo_O_list_func()を呼ぶ
"""
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
if office_re_func().search(o_O_word): # office
office_ja_func()
elif order_re_func().search(o_O_word): # order
order_ja_func()
elif outlook_re_func().search(o_O_word): # outlook
outlook_ja_func()
elif o_O_list_re_func().search(o_O_word): # o-list
o_O_list_func()
else:
print('not found...')
| [
"yui.maa5800@gmail.com"
] | yui.maa5800@gmail.com |
8cb50e722107fc700d8f17e3ee0ff96f2942120c | 908554f8250780024ffdd6c6f32a65acc36ec5cd | /backend/task_category/migrations/0001_initial.py | 63a0837d759cfbb72a35d63a7f45744f3ea04b43 | [] | no_license | crowdbotics-apps/petsker-23110 | 799032a970fce4da74d9ced06655b073ecee1621 | ca4d40e9b4b72d36fc60c8544c4e27635864a203 | refs/heads/master | 2023-01-21T21:39:29.108783 | 2020-12-01T15:54:38 | 2020-12-01T15:54:38 | 317,582,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | # Generated by Django 2.2.17 on 2020-12-01 15:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("icon", models.URLField()),
("description", models.TextField(blank=True, null=True)),
("is_recurring", models.BooleanField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="Subcategory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("description", models.TextField(blank=True, null=True)),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subcategory_category",
to="task_category.Category",
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d97eaea3fb21768d3835c717678dd583e27be0e8 | df789505c99974c0ba45adc57e52fc7865ff2a28 | /python练习题/常用案例.py | f75ecfae83f1b4f2f8b671fcb764e2d1ca341108 | [] | no_license | zhiwenwei/python | 6fc231e47a9fbb555efa287ac121546e07b70f06 | 76d267e68f762ee9d7706e1800f160929544a0a3 | refs/heads/master | 2021-01-20T04:21:44.825752 | 2018-12-19T06:20:10 | 2018-12-19T06:20:10 | 89,676,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #coding:utf-8
#Author:Mr Zhi
#排列组合,将4个数字可能组成的所有互不相同且无重复数字的排列组合列出。
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if (i != k) and (i != j) and (j != k ):
print(i,j,k)
import fileinput
| [
"ddzhiwenwei@163.com"
] | ddzhiwenwei@163.com |
01743d7f3fd3ed87763ed1ea40e97069ac07d8c2 | 4be2c72579486ad04a00db0349028de96d2dce89 | /scripts/aTools/animTools/animBar/animBarUI.py | c47fbf7c062fa74f255ab5c363e08a4142f0e8b9 | [] | no_license | italic-r/maya-prefs | 6a617d40beee8937186b4699c5cead44e01c2d40 | aa21e5e2938dc2698ce5f555ee74a594e08aed2b | refs/heads/master | 2021-09-09T16:31:00.411349 | 2018-03-18T01:40:10 | 2018-03-18T01:40:10 | 86,961,959 | 16 | 8 | null | null | null | null | UTF-8 | Python | false | false | 6,651 | py | '''
========================================================================================================================
Author: Alan Camilo
www.alancamilo.com
Requirements: aTools Package
------------------------------------------------------------------------------------------------------------------------
To install aTools, please follow the instructions in the file how_to_install.txt, located in the folder aTools
------------------------------------------------------------------------------------------------------------------------
To unistall aTools, go to menu (the last button on the right), Uninstall
========================================================================================================================
'''
# maya modulesspecialTools
from maya import cmds
from aTools.generalTools.aToolsGlobals import aToolsGlobals as G
from aTools.generalTools import aToolsClasses; reload(aToolsClasses)
from aTools.commonMods import animMod; reload(animMod)
from aTools.generalTools import generalToolsUI; reload(generalToolsUI)
from aTools.commonMods import utilMod; reload(utilMod)
from aTools.commonMods import commandsMod; reload(commandsMod)
from aTools.commonMods import aToolsMod; reload(aToolsMod)
from aTools import setup; reload(setup)
# constants
SUB_UI_MODS = ["tweenMachine", "keyTransform", "tangents", "specialTools", "tUtilities"]
# import subUI modules
for loopMod in SUB_UI_MODS:
exec("import aTools.animTools.animBar.subUIs.%s as %s; reload(%s)"%(loopMod, loopMod, loopMod))
def show(mode="show"):
G.aToolsBar = G.aToolsBar or AnimationBar_Gui()
if mode == False: mode = "show"
if mode == True: mode = "toggle"
if mode == "launch":
lastState = aToolsMod.loadInfoWithUser("userPrefs", "animationBarLastState")
if lastState: show()
return
if mode == "show" or mode == "hide":
if cmds.toolBar("aTools_Animation_Bar", query=True, exists=True):
visible = (mode == "show")
cmds.toolBar("aTools_Animation_Bar", edit=True, visible=visible)
G.aToolsBar.saveLastState(visible)
return
elif mode == "show":
G.aToolsBar.start()
G.aToolsBar.saveLastState()
return
if mode == "toggle":
if cmds.toolBar("aTools_Animation_Bar", query=True, exists=True):
state = cmds.toolBar("aTools_Animation_Bar", query=True, visible=True)
visible = (not state)
G.aToolsBar.toggleToolbars(visible)
cmds.toolBar("aTools_Animation_Bar", edit=True, visible=visible)
G.aToolsBar.saveLastState(visible)
return
else:
show()
return
if mode == "refresh":
G.aToolsBar = AnimationBar_Gui()
G.aToolsBar.start()
G.aToolsBar.saveLastState()
class AnimationBar_Gui(object):
def __init__(self):
self.winName = "aAnimationBarWin"
self.toolbarName = "aTools_Animation_Bar"
self.allWin = [self.winName, self.toolbarName]
self.buttonSize = {"small":[15, 20], "big":[25, 25]}
self.barOffset = 0
self.barHotkeys = {}
G.aToolsUIs = {"toolbars":[
],
"windows":[
]}
# [ SUBUIs ]
self.uiList = None
self.subUIs = None
def __getattr__(self, attr):
return None
def start(self):
from aTools.generalTools import aToolsClasses; reload(aToolsClasses)
self.startUpFunctions()
self.delWindows()
self.createWin()
def startUpFunctions(self):
#wait cursor state
n = 0
while True:
if not cmds.waitCursor(query=True, state=True) or n > 100: break
cmds.waitCursor(state=False)
n += 1
#refresh state
cmds.refresh(suspend=False)
#undo state
if not cmds.undoInfo(query=True, stateWithoutFlush=True): cmds.undoInfo(stateWithoutFlush=True)
#progress bar state
utilMod.setProgressBar(status=None, progress=None, endProgress=True)
def saveLastState(self, state=True):
aToolsMod.saveInfoWithUser("userPrefs", "animationBarLastState", state)
def createWin(self):
# Creates window
self.mainWin = cmds.window(self.winName, sizeable=True)
# Main frame
cmds.frameLayout("mainFrameLayout", labelVisible=False, borderVisible=False, w=10, marginHeight=0, marginWidth=0, labelIndent=0, collapsable=False)
cmds.rowLayout(numberOfColumns=2, adjustableColumn=1, columnAttach=([2, 'right', self.barOffset]), h=37)
cmds.text(label="")
self.subUIsLayout = cmds.rowLayout("mainLayout", numberOfColumns=len(SUB_UI_MODS)+2)
# subUIs
self.uiList = [eval("%s.%s%s_Gui"%(loopUi, loopUi[0].upper(), loopUi[1:])) for loopUi in SUB_UI_MODS]
# append general tools ui
self.uiList.append(generalToolsUI.GeneralTools_Gui)
# define subUis
self.subUIs = [loopUi(self.subUIsLayout, self.buttonSize) for loopUi in self.uiList]
self.addSubUIs()
# shows toolbar
cmds.toolBar(self.toolbarName, area='bottom', content=self.mainWin, allowedArea=['bottom'])
# end method createWin
#---------------------------------------------------------------------
def addSubUIs(self):
# parent subUis to the main layout
for loopIndex, loopSubUI in enumerate(self.subUIs):
loopSubUI.createLayout()
# space
if loopIndex < len(self.subUIs) -1:
cmds.rowLayout(numberOfColumns=2)
cmds.text( label=' ', h=1 )
# end for
def toggleToolbars(self, visible):
pass
def delWindows(self, onOff=True, forceOff=False):
for loopWin in self.allWin:
if cmds.window(loopWin, query=True, exists=True): cmds.deleteUI(loopWin)
if cmds.toolBar(loopWin, query=True, exists=True):
cmds.deleteUI(loopWin)
| [
"italic.rendezvous@gmail.com"
] | italic.rendezvous@gmail.com |
18a1c1067bad2013e8ca5d5d30bccc4e31eb262f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2468/.mooctest/answer.py | 97b5f1afb2362e43de2e28c40adb9a4f8b3ccb58 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | t=int(input())
while(t>0):
n=int(input())
a=[]
num=input()
for x in num.split():
a.append(int(x))
P=[]
i=0
p=1
for x in a:
p=p*x
while i<n:
P.insert(i,p//a[i])
i=i+1
for x in P:
print(x,end=" ")
print()
t=t-1
| [
"382335657@qq.com"
] | 382335657@qq.com |
421123dd75c5d9c60f99ea7d66de6b553bae9db3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/148/usersdata/272/86767/submittedfiles/testes.py | db6500d7b4c0772c218a8e23b5ab043f7012a066 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def impar(x):
if ((x%2)!=0):
return (True)
else:
return (False)
a=int(input('Digite a: '))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8c1ec39b364c12047ff70031d242a8c292be9e94 | 81b0e6fe7a6e56ed8a91748499b81ddd3f2e45f8 | /neural_network.py | 56ddb8ba46c9ef0d3b02ce7f51677509286dce26 | [] | no_license | shieldforever/DeepLearning | cfef817602b9677df4df4c1b87e60c5e91f2315a | b8080938a4b22395379be9032266df36cb5491e6 | refs/heads/master | 2021-01-05T14:12:26.110888 | 2019-10-29T11:23:06 | 2019-10-29T11:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 14 17:55:42 2019
@author: Administrator
"""
import numpy as np
from scipy import special
class neuralNetwork(object):
#initialize the neural network
def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):
self.inodes=inputnodes
self.hnodes=hiddennodes
self.onodes=outputnodes
self.lr=learningrate
#self.wih=np.random.rand(self.hnodes,self.inodes)-0.5
#self.who=np.random.rand(self.onodes,self.hnodes)-0.5
#means,Variance,shape of array
self.wih = np.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
#sigmoid function
self.activation_function=lambda x:special.expit(x)
pass
#train the neural network
def train(self,inputs_list,targets_list):
#convert inputs list and targets list to 2d array
inputs=np.array(inputs_list,ndmin=2).T
targets=np.array(targets_list,ndmin=2).T
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
output_errors=targets-final_outputs
hidden_errors=np.dot(self.who.T,output_errors)
self.who +=self.lr*np.dot((output_errors*final_outputs*(1.0-final_outputs)),
np.transpose(hidden_outputs))
self.wih +=self.lr*np.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),
np.transpose(inputs))
pass
#query the neural network
def query(self,inputs_list):
# convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes=3
hidden_nodes=3
output_nodes=3
learning_rate=0.3
n=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)
n.train([1,0.5,-0.5],[0.8,0.6,0.6])
print(n.query([1,0.5,-0.5]))
'''
a1=np.random.rand(3,3) #0-1的随机值
a2=a1-0.5 #-0.5-0.5的随机值
'''
| [
"870407139@qq.com"
] | 870407139@qq.com |
c0a0e1a4bdaa1c76b49f676e19384408c030d1c1 | e1f519fc0c4f76d11db9584f74c5b49ca95b0798 | /cs_notes/arrays/RLE_iterator.py | 8a78d35ba581f17f8aca1d70416549bdd8f05e1b | [] | no_license | hwc1824/LeetCodeSolution | 22d41327cde2b9562f58cc73e6205c7c2f9a5e1c | ac53dd9bf2c4c9d17c9dc5f7fdda32e386658fdd | refs/heads/master | 2023-08-16T10:15:39.351933 | 2018-12-19T00:43:07 | 2018-12-19T00:43:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # 900. RLE Iterator
# https://leetcode.com/problems/rle-iterator/description/
class RLEIterator:
# 下面的解法的想法是,只要記錄目前走到第幾步就好了
# 儲存 A 的時候將連續出現的次數變換成最後一次出現的位置
# 當 next 被呼叫的時候,將 n 累計起來
# 然後就只要查閱 n 介於那兩個數字的最後一次出現位置之間
def __init__(self, A):
"""
:type A: List[int]
"""
self.curr = 0
self.idx = 0
self.record = []
c_sum = 0
for i in range(0, len(A), 2):
c_sum += A[i]
if A[i] != 0: self.record.append((c_sum, A[i+1]))
def next(self, n):
"""
:type n: int
:rtype: int
"""
if self.idx >= len(self.record): return -1
self.curr+=n
for count, val in self.record[self.idx:]:
if self.curr <= count: return val
self.idx+=1
return -1
# Your RLEIterator object will be instantiated and called as such:
# obj = RLEIterator(A)
# param_1 = obj.next(n)
| [
"eraxer0165749@gmail.com"
] | eraxer0165749@gmail.com |
c549025fe6b58ffe1ccc0b22996cabc8736f8306 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/4499367/snippet.py | 5581424b74f80c38427bb49d1be8aff71949ed4c | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 6,695 | py | class ReloaderEventHandler(FileSystemEventHandler):
"""
Listen for changes to modules within the Django project
On change, reload the module in the Python Shell
Custom logic required to reload django models.py modules
Due to the singleton AppCache, which caches model references.
For those models files, we must clear and repopulate the AppCache
"""
def __init__(self, *args, **kwargs):
self.project_root = kwargs.pop('project_root', None)
self.shell_globals = kwargs.pop('shell_globals', None)
self.model_globals = kwargs.pop('model_globals', None)
super(ReloaderEventHandler, self).__init__(*args, **kwargs)
def dispatch(self, event):
event_path = event.src_path
path, file_extension = os.path.splitext(event_path)
if all([
file_extension == '.py',
'shell_plus' not in path,
self.project_root in path
]):
return super(ReloaderEventHandler, self).dispatch(event)
def on_created(self, event):
super(ReloaderEventHandler, self).on_created(event)
self._force_reload(event)
def on_modified(self, event):
"""
Called by dispatch on modification of file in the Django project
"""
super(ReloaderEventHandler, self).on_modified(event)
self._force_reload(event)
def _force_reload(self, event):
"""
Reload the altered module
models.py files and all other python files are handled differently
This is because models are cached by Django in a singleton
We need to clear this singleton to properly reload
"""
cleaned_path = self._clean_path(event.src_path)
path_components = cleaned_path.split(os.path.sep)
if path_components[-1] == 'models':
self._reload_models_module(path_components[-2])
# This redundant call bizarrely seems necessary
# Issue exists around recompiling models.pyc file on 1st attempt
# Subsequent reloads always work
# time.sleep(1)
self._reload_models_module(path_components[-2])
else:
self._reload_module(path_components)
def _clean_path(self, path):
"""Remove the leading project path"""
project_root = self.project_root if self.project_root.endswith('/') else "{}/".format(self.project_root)
path_from_project_root = path.replace(project_root, '')
# Remove trailing ".py" from module for importing purposes
return os.path.splitext(path_from_project_root)[0]
def _reload_module(self, path_components):
"""
Wrapper for __builtin__ reload() function
In addition to reloading the module,
we reset the associated classes in the global scope of the shell.
Consequently, we don't have to manaully reimport (i.e. 'from app import MyClass')
Instead, MyClass will have changed for us automagically
More interestingly, we also dynamically update the classes
of existing object instances in the global scope with `_update_class_instances`.
## In a Shell session
obj = MyKLS()
obj.getchar() --> 'a'
## While still in the Shell,
### We change the function definition of getchar() in the filesytem to return 'b'
### In our Shell, we will see that
obj.getchar() --> 'b'
This behavior is very experimental and possibly dangerous but powerful
Cuts down time and frustration during pdb debugging
"""
# We attempt to import the module from the project root
# This SHOULD be agnostic of app/project structure
while True:
try:
module = importlib.import_module('.'.join(path_components))
except ImportError:
path_components.pop(0)
if not path_components:
return
else:
break
reload(module)
# Reload objects into the global scope
# This has the potential to cause namespace collisions
# The upside is that we don't have to reimport (i.e. from module import ObjName)
for attr in dir(module):
if (
not(attr.startswith('__') and attr.endswith('__'))
and self.shell_globals.get(attr)
):
self.shell_globals[attr] = getattr(module, attr)
self._update_class_instances(module, attr)
def _reload_models_module(self, app_name):
"""
Reload Django models
Based on http://stackoverflow.com/questions/890924/how-do-you-reload-a-django-model-module-using-the-interactive-interpreter-via-m
"""
curdir = os.getcwd()
cache = AppCache()
for app in cache.get_apps():
f = app.__file__
if f.startswith(curdir) and f.endswith('.pyc'):
try:
os.remove(f)
except Exception:
pass
__import__(app.__name__)
reload(app)
cache.app_store = SortedDict()
cache.app_models = SortedDict()
cache.app_errors = {}
cache.handled = {}
cache.loaded = False
# Reset app's models in global scope
# Using a dictionary here instead of cache.get_models(app_name)
# The latter does not seem to work (look into that)
reimported_app = importlib.import_module("{}.models".format(app_name))
model_names = self.model_globals[app_name]
for model_name in model_names:
self.shell_globals[model_name] = getattr(reimported_app, model_name)
self._update_class_instances(reimported_app, model_name)
def _update_class_instances(self, module, attr):
"""
Reset the __class__ of all instances whoses
class has been reloaded into the shell
This allows us to do CRAZY things such as
effectively manipulate an instance's source code
while inside a debugger
"""
module_obj = getattr(module, attr)
if inspect.isclass(module_obj):
for obj in self.shell_globals.values():
# hasattr condition attempts to handle old style classes
# The class __str__ check may not be ideal but it seems to work
# The one exception being if you changes the __str__ method
# of the reloaded object. That edge case is not handled
if hasattr(obj, '__class__') and str(obj.__class__) == str(module_obj):
obj.__class__ = module_obj | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
b9dc54cabc677d71d8007287459dced2f2617cad | 099b57613250ae0a0c3c75cc2a9b8095a5aac312 | /一些总结/测试文件.py | a6b7753c5aaf1e6131ed711b632a56cb8e88a6b0 | [] | no_license | MitsurugiMeiya/Leetcoding | 36e41c8d649b777e5c057a5241007d04ad8f61cd | 87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663 | refs/heads/master | 2022-06-17T19:48:41.692320 | 2020-05-13T16:45:54 | 2020-05-13T16:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def test(self,head):
while head.next != None:
print(head.val)
head = head.next
if __name__ == "__main__":
solution = Solution()
solution.test()
| [
"yifu3@ualberta.ca"
] | yifu3@ualberta.ca |
96485320f651c95c1a482e4192a688a2b0660b02 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/2577781/snippet.py | e33c008d1679fcecad43252d5649b7c362333a02 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
# Thread pool based on: http://code.activestate.com/recipes/577187-python-thread-pool/
from queue import Queue
from threading import Thread
from functools import partial
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado import gen
from tornado.ioloop import IOLoop
import himitsu
def make_hash(text):
b = himitsu.Bcrypt()
return b.encode(text)
class WorkerThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
self.daemon = True
self.start()
def run(self):
while True:
func, args, kwargs, callback = self.queue.get()
try:
result = func(*args, **kwargs)
if callback is not None:
IOLoop.instance().add_callback(partial(callback, result))
except Exception as e:
print(e)
self.queue.task_done()
class ThreadPool(object):
def __init__(self, num_threads):
self.queue = Queue()
for _ in range(num_threads):
WorkerThread(self.queue)
def add_task(self, func, args=(), kwargs={}, callback=None):
self.queue.put((func, args, kwargs, callback))
def wait_completion(self):
self.queue.join()
class BaseHandler(tornado.web.RequestHandler):
@property
def pool(self):
if not hasattr(self.application, 'pool'):
self.application.pool = ThreadPool(20)
return self.application.pool
class IndexHandler(BaseHandler):
@tornado.web.asynchronous
@gen.engine
def get(self):
result = yield gen.Task(
self.pool.add_task, make_hash, ('Test',)
)
self.write(result)
self.finish()
def main():
try:
tornado.options.parse_command_line()
application = tornado.web.Application([
(r'/', IndexHandler)
], debug=True)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print('Exit')
if __name__ == '__main__':
main()
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
e7f286d32115897aa45a6bdb8da562cfd9ae6f5d | 1eab574606dffb14a63195de994ee7c2355989b1 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/bmacmappedip_dfnlz21lbnrzl2jnywnnyxbwzwrjca.py | f27128cbf257a11174bdaf3fe003acc13969aa81 | [
"MIT"
] | permissive | steiler/ixnetwork_restpy | 56b3f08726301e9938aaea26f6dcd20ebf53c806 | dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9 | refs/heads/master | 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null | UTF-8 | Python | false | false | 5,867 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BMacMappedIp(Base):
"""This objects holds all the IP (V4/V6) addresses associated with a B-MAC of an ethernet segment.
The BMacMappedIp class encapsulates a list of bMacMappedIp resources that is be managed by the user.
A list of resources can be retrieved from the server using the BMacMappedIp.find() method.
The list can be managed by the user by using the BMacMappedIp.add() and BMacMappedIp.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bMacMappedIp'
def __init__(self, parent):
super(BMacMappedIp, self).__init__(parent)
@property
def Enabled(self):
"""If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def IpAddress(self):
"""IP address value is given here depending on the IP Type. Default value is all zero.
Returns:
str
"""
return self._get_attribute('ipAddress')
@IpAddress.setter
def IpAddress(self, value):
self._set_attribute('ipAddress', value)
@property
def IpType(self):
"""Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
str(ipV4|ipV6)
"""
return self._get_attribute('ipType')
@IpType.setter
def IpType(self, value):
self._set_attribute('ipType', value)
def update(self, Enabled=None, IpAddress=None, IpType=None):
"""Updates a child instance of bMacMappedIp on the server.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, Enabled=None, IpAddress=None, IpType=None):
"""Adds a new bMacMappedIp node on the server and retrieves it in this instance.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
self: This instance with all currently retrieved bMacMappedIp data using find and the newly added bMacMappedIp data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the bMacMappedIp data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, IpAddress=None, IpType=None):
"""Finds and retrieves bMacMappedIp data from the server.
All named parameters support regex and can be used to selectively retrieve bMacMappedIp data from the server.
By default the find method takes no parameters and will retrieve all bMacMappedIp data from the server.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
self: This instance with matching bMacMappedIp data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of bMacMappedIp data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the bMacMappedIp data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
280180605538ca9ee2017ad7f97de20f1a449c35 | 74549d7c57b4746ac2a9c275aa12bfc577b0e8af | /funny_string.py | ade6aa44436eeb9690be1bfa5f29809cbb0e93ea | [] | no_license | abidkhan484/hackerrank_solution | af9dbf6ec1ead920dc18df233f40db0c867720b4 | b0a98e4bdfa71a4671999f16ab313cc5c76a1b7a | refs/heads/master | 2022-05-02T11:13:29.447127 | 2022-04-13T03:02:59 | 2022-04-13T03:02:59 | 99,207,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/bin/python3
n = int(input().strip())
for i in range(n):
s = input().strip()
# tmp list, to get the consequtive subtraction of the s list(string)
s = list(s)
tmp = []
l = len(s)
for j in range(1, l):
temp = abs(ord(s[j]) - ord(s[j-1]))
tmp.append(temp)
# now reverse operation occer and check tmp with the reverse,
# consequtive subtractions
s.reverse()
for j in range(1, l):
temp = abs(ord(s[j]) - ord(s[j-1]))
if temp != tmp[j-1]:
break
# if j is checked till the last, then print funny
if j == l-1:
print("Funny")
else:
print("Not Funny")
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
ce7b7d741e6cdf375737b61908957bd3e62f89a9 | 7b2a3ea853dc44aea204f02abedaad6a2029f4ff | /preprocess_4mem2d.py | 4511128676f7bf408c9f199ba12a43ca4469807f | [] | no_license | NoisyLeon/SW4Py | 7d45503282dc988b5f886c039706bd79fdd6b339 | 7029f18eb526bcb46b4aa244da1e088ca57a56aa | refs/heads/master | 2020-12-22T14:57:11.265397 | 2016-12-20T18:27:18 | 2016-12-20T18:27:18 | 56,792,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | import vmodel, stations
# SLst=stations.StaLst()
# # SLst.HomoStaLst(xmin=20000, Nx=149, dx=20000, ymin=20000, Ny=29, dy=20000)
# SLst.LineStaLst(xmin=300000, Nx=133, dx=20000, y=300000)
# SLst.WriteStaList('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/station_4mem2d.lst')
# # SLst.Write2Input(infname='/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_ring_basin.in')
# SLst.Write2Input(infname='/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_homo_basin.in')
# #
rmodel=vmodel.rModel()
rmodel.ak135(zmin=0., zmax=100., ni=3001, nj=601, hh=1000., hv=1000., CPS=True)
rmodel.checkInput('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_staircase_basin.in')
# rmodel.checkInput('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_homo_basin.in')
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# rmodel.CynlinderRingBasin(x0=1100000, y0=300000, zmax=4000., Rmax=200000., vs=2000., outfname='./cpsin.txt')
# rmodel.CylinderHomoAnomaly(x0=1100000, y0=300000, zmax=4000., R=200000., dm=-0.1)
rmodel.CylinderLinearDepthAnomalyAll(x0=1100000, y0=300000, R=100000, vt=2000., vb=3000., zmax=5000, zmin=0, nb=None, outfname='cpsin_staircase.txt')
# rmodel.CylinderHomoSediment(x0=1100000, y0=300000, R=100000, zmax=3000, vs=2000.)
#
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# rmodel.writeVprofile('./cpsinput_4km_0.3.txt')
#
rmodel.write('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_staircase_basin.rfile')
#
| [
"lili.feng@colorado.edu"
] | lili.feng@colorado.edu |
9fedd6c3390c1fce9081e9c32411b6ec4b73d856 | ba0a2b0d2d1534443ea34320675aadfa378457b6 | /String/Q843_Guess the Word.py | 14717252223355cffe90ae93b5a66190717a6e68 | [] | no_license | Luolingwei/LeetCode | 73abd58af116f3ec59fd6c76f662beb2a413586c | 79d4824879d0faed117eee9d99615cd478432a14 | refs/heads/master | 2021-08-08T17:45:19.215454 | 2021-06-17T17:03:15 | 2021-06-17T17:03:15 | 152,186,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,444 | py |
# 思路1: 每次随机选择一个word, 得到score, 留下list中和当前guess match结果为score的word
# 思路2: 因为最有可能的情况是一个字符都不match, 概率为 (25/26)^6 = 80%, 所以最有可能的情况是首先guess到match score=0,
# 可以从candidates中选择和其他word的 match score = 0 数量最小的word, 这样candidates 集合减小最快
# O(N^2)
# 思路3: 根据char frequency的总和 选择family 最大的那个word, 这样一旦guess到match score=0, 所有和这个word有重叠的candidate都会被剔除掉
# 理论上guess所有位置freq最大char组成的word, 可以一次剔除最多的candidate, 但是本题只能在wordlist中guess
# O(N)
# """
# This is Master's API interface.
# You should not implement it, or speculate about its implementation
# """
# class Master:
# def guess(self, word: str) -> int:
import random
from collections import Counter
class Solution:
def findSecretWord1(self, wordlist, master):
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
for _ in range(10):
word = random.choice(wordlist)
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score]
def findSecretWord2(self, wordlist, master):
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
def select(wordlist):
count = Counter()
for i in range(len(wordlist)):
for j in range(len(wordlist)):
if j != i and match(wordlist[i], wordlist[j]) == 0:
count[i] += 1
return min(range(len(wordlist)), key=lambda x: count[x])
for _ in range(10):
word = wordlist[select(wordlist)]
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score]
def findSecretWord3(self, wordlist, master) -> None:
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
def select(wordlist):
freqs = [Counter(w[i] for w in wordlist) for i in range(6)]
word = max(wordlist, key=lambda x: sum(freqs[i][c] for i, c in enumerate(x)))
return word
for _ in range(10):
word = select(wordlist)
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score] | [
"564258080@qq.com"
] | 564258080@qq.com |
40bc42df545f07ac7d1662bc6dfebf1fdf0c95a7 | 107e869bc298c74bf2418b53b630ca57c00bc68b | /src/repro/model/densenet201.py | 977eb43ebc36e871f5c9e9ee61ac995971421ec5 | [
"BSD-3-Clause"
] | permissive | bouthilx/repro | 699f1f635872507bd054b57ec03140f998a9f7d1 | 611734e4eddd6a76dd4c1e7114a28a634a2a75c1 | refs/heads/dev | 2020-04-18T03:26:55.670831 | 2019-01-30T17:56:21 | 2019-01-30T17:56:21 | 167,199,050 | 0 | 0 | BSD-3-Clause | 2019-01-23T19:48:35 | 2019-01-23T14:44:15 | Python | UTF-8 | Python | false | false | 283 | py | from repro.model.densenet import DenseNet, distribute
def build(input_size, num_classes, distributed=0):
model = DenseNet(input_size=input_size, num_init_features=64, growth_rate=32,
block_config=(6, 12, 48, 32))
return distribute(model, distributed)
| [
"xavier.bouthillier@umontreal.ca"
] | xavier.bouthillier@umontreal.ca |
b64d25c8ad3b2647cc130320042a122d807f1f72 | 50518b396163f9ee07e762cc30ec86a49c35782c | /DACON_LG_블럭장난감제조공정/baseline/module/simulator.py | 57b0815bd4d8b53ad80621dd2ec89028bb09e55e | [] | no_license | madfalc0n/Dacon_AI | 0185fe4cfd7ba9716b7420d7f464f6901be7382e | 68f9aaab2a7fcc0e634bce67199d8b22049c4a09 | refs/heads/master | 2023-03-23T08:39:34.910144 | 2021-03-09T10:28:05 | 2021-03-09T10:28:05 | 274,060,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,772 | py | import os
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from pathlib import Path
class Simulator:
def __init__(self):
self.sample_submission = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'sample_submission.csv'))
self.max_count = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'max_count.csv'))
self.stock = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'stock.csv'))
order = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'order.csv'), index_col=0)
order.index = pd.to_datetime(order.index)
self.order = order
def get_state(self, data):
if 'CHECK' in data:
return int(data[-1])
elif 'CHANGE' in data:
return int(data[-1])
else:
return np.nan
def cal_schedule_part_1(self, df):
columns = ['PRT_1', 'PRT_2', 'PRT_3', 'PRT_4']
df_set = df[columns]
df_out = df_set * 0
p = 0.985
dt = pd.Timedelta(days=23)
end_time = df_out.index[-1]
for time in df_out.index:
out_time = time + dt
if end_time < out_time:
break
else:
for column in columns:
set_num = df_set.loc[time, column]
if set_num > 0:
out_num = np.sum(np.random.choice(2, set_num, p=[1-p, p]))
df_out.loc[out_time, column] = out_num
df_out['MOL_1'] = 0.0
df_out['MOL_2'] = 0.0
df_out['MOL_3'] = 0.0
df_out['MOL_4'] = 0.0
df_out['BLK_1'] = 0.0
df_out['BLK_2'] = 0.0
df_out['BLK_3'] = 0.0
df_out['BLK_4'] = 0.0
return df_out
def cal_schedule_part_2(self, df, line='A'):
if line == 'A':
columns = ['Event_A', 'MOL_A']
elif line == 'B':
columns = ['Event_B', 'MOL_B']
else:
columns = ['Event_A', 'MOL_A']
schedule = df[columns].copy()
schedule['state'] = 0
schedule['state'] = schedule[columns[0]].apply(lambda x: self.get_state(x))
schedule['state'] = schedule['state'].fillna(method='ffill')
schedule['state'] = schedule['state'].fillna(0)
schedule_process = schedule.loc[schedule[columns[0]]=='PROCESS']
df_out = schedule.drop(schedule.columns, axis=1)
df_out['PRT_1'] = 0.0
df_out['PRT_2'] = 0.0
df_out['PRT_3'] = 0.0
df_out['PRT_4'] = 0.0
df_out['MOL_1'] = 0.0
df_out['MOL_2'] = 0.0
df_out['MOL_3'] = 0.0
df_out['MOL_4'] = 0.0
p = 0.975
times = schedule_process.index
for i, time in enumerate(times):
value = schedule.loc[time, columns[1]]
state = int(schedule.loc[time, 'state'])
df_out.loc[time, 'PRT_'+str(state)] = -value
if i+48 < len(times):
out_time = times[i+48]
df_out.loc[out_time, 'MOL_'+str(state)] = value*p
df_out['BLK_1'] = 0.0
df_out['BLK_2'] = 0.0
df_out['BLK_3'] = 0.0
df_out['BLK_4'] = 0.0
return df_out
def cal_stock(self, df, df_order):
df_stock = df * 0
blk2mol = {}
blk2mol['BLK_1'] = 'MOL_1'
blk2mol['BLK_2'] = 'MOL_2'
blk2mol['BLK_3'] = 'MOL_3'
blk2mol['BLK_4'] = 'MOL_4'
cut = {}
cut['BLK_1'] = 506
cut['BLK_2'] = 506
cut['BLK_3'] = 400
cut['BLK_4'] = 400
p = {}
p['BLK_1'] = 0.851
p['BLK_2'] = 0.901
blk_diffs = []
for i, time in enumerate(df.index):
month = time.month
if month == 4:
p['BLK_3'] = 0.710
p['BLK_4'] = 0.700
elif month == 5:
p['BLK_3'] = 0.742
p['BLK_4'] = 0.732
elif month == 6:
p['BLK_3'] = 0.759
p['BLK_4'] = 0.749
else:
p['BLK_3'] = 0.0
p['BLK_4'] = 0.0
if i == 0:
df_stock.iloc[i] = df.iloc[i]
else:
df_stock.iloc[i] = df_stock.iloc[i-1] + df.iloc[i]
for column in df_order.columns:
val = df_order.loc[time, column]
if val > 0:
mol_col = blk2mol[column]
mol_num = df_stock.loc[time, mol_col]
df_stock.loc[time, mol_col] = 0
blk_gen = int(mol_num*p[column]*cut[column])
blk_stock = df_stock.loc[time, column] + blk_gen
blk_diff = blk_stock - val
df_stock.loc[time, column] = blk_diff
blk_diffs.append(blk_diff)
return df_stock, blk_diffs
def subprocess(self, df):
out = df.copy()
column = 'time'
out.index = pd.to_datetime(out[column])
out = out.drop([column], axis=1)
out.index.name = column
return out
def add_stock(self, df, df_stock):
df_out = df.copy()
for column in df_out.columns:
df_out.iloc[0][column] = df_out.iloc[0][column] + df_stock.iloc[0][column]
return df_out
def order_rescale(self, df, df_order):
df_rescale = df.drop(df.columns, axis=1)
dt = pd.Timedelta(hours=18)
for column in ['BLK_1', 'BLK_2', 'BLK_3', 'BLK_4']:
for time in df_order.index:
df_rescale.loc[time+dt, column] = df_order.loc[time, column]
df_rescale = df_rescale.fillna(0)
return df_rescale
def cal_score(self, blk_diffs):
# Block Order Difference
blk_diff_m = 0
blk_diff_p = 0
for item in blk_diffs:
if item < 0:
blk_diff_m = blk_diff_m + abs(item)
if item > 0:
blk_diff_p = blk_diff_p + abs(item)
score = blk_diff_m + blk_diff_p
return score
def get_score(self, df):
df = self.subprocess(df)
out_1 = self.cal_schedule_part_1(df)
out_2 = self.cal_schedule_part_2(df, line='A')
out_3 = self.cal_schedule_part_2(df, line='B')
out = out_1 + out_2 + out_3
out = self.add_stock(out, self.stock)
order = self.order_rescale(out, self.order)
out, blk_diffs = self.cal_stock(out, order)
score = self.cal_score(blk_diffs)
return score, out
| [
"chadool116@naver.com"
] | chadool116@naver.com |
2c6f87182a68ad85ede0cd34bc337b5fcded27ab | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_4eig_B.py | 02617609224e47a8260dfad0e57980bf1975417d | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '4eig.csv'
identifier = 'B'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
c6bf383d8e5a35af168c40a96a2535f5a4986f6c | 306afd5282d9c24d58297478a1728a006c29e57e | /lintcode/lintcode_040_Implement_Queue by_Two_Stacks.py | 9f608da5577673762929157d354418a80e4c236b | [] | no_license | ytatus94/Leetcode | d2c1fe3995c7a065139f772569485dc6184295a9 | 01ee75be4ec9bbb080f170cb747f3fc443eb4d55 | refs/heads/master | 2023-06-08T17:32:34.439601 | 2023-05-29T04:33:19 | 2023-05-29T04:33:19 | 171,921,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | class MyQueue:
def __init__(self):
# do intialization if necessary
self.stack1 = []
self.stack2 = [] # 用來暫時放 stack1 的東西
"""
@param: element: An integer
@return: nothing
"""
def push(self, element):
# write your code here
self.stack1.append(element)
"""
@return: An integer
"""
def pop(self):
# write your code here
# 只看 stack1 就好
# 把 stack1 的內容都拿出來放到 stack2
# 這樣在 stack2 內的順序正好和 stack1 顛倒
while len(self.stack1) > 0:
self.stack2.append(self.stack1.pop())
res = self.stack2.pop() # stack2 的最後一個就是 stack1 的第一個
# 再把全部塞回 stack1
while len(self.stack2) > 0:
self.stack1.append(self.stack2.pop())
return res
"""
@return: An integer
"""
def top(self):
# write your code here
return self.stack1[0] # 題目說 top 傳回的是第一個元素
| [
"noreply@github.com"
] | ytatus94.noreply@github.com |
d4295b1a86339eafa1623e827e5b977d4aa665b6 | 10c26e25f7da2289d50b1138b7da48bf9a02d42f | /Oj/users/views.py | 2058ad4470ff413c387bbdd003f3b5c7b3b3942e | [] | no_license | ParitoshAggarwal/OJ | e1392a02dd95d42b4d72ba69b891db9df5e406ad | 1a4acb5e620b0575d744fd8e4c13148062d1670c | refs/heads/master | 2022-10-19T21:18:02.512008 | 2017-12-27T06:53:46 | 2017-12-27T06:53:46 | 97,516,099 | 0 | 1 | null | 2022-10-13T00:05:44 | 2017-07-17T19:50:06 | JavaScript | UTF-8 | Python | false | false | 3,067 | py | from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View
from .modelForms import CoderForm, UserForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.views.generic.edit import UpdateView
from .models import Coder
from django.urls import reverse
class UserFormView(View):
form_class1 = UserForm
form_class2 = CoderForm
template_name = 'users/reg_form.html'
def get(self, request):
form1 = self.form_class1(None)
form2 = self.form_class2(None)
return render(request, self.template_name,
{'form1' : form1,'form2':form2})
def post(self, request):
form1=self.form_class1(request.POST)
form2 = self.form_class2(request.POST)
if form1.is_valid() and form2.is_valid():
user = form1.save(commit=False)
coder = form2.save(commit=False)
username = form1.cleaned_data['username']
password = form1.cleaned_data['password']
email = form1.clean_email()
user.set_password(password)
user.save()
coder.user = user
coder.save()
user = authenticate(username = username,password = password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/')
return render(request, self.template_name, {'form1': form1, 'form2': form2})
class MainPageView(View):
template_name = 'main_page.html'
def get(self, request):
user = None
if request.user.is_authenticated():
user = request.user
return render(request, self.template_name, {'user': user})
class LoginPageView(View):
template_name = 'users/login_page.html'
def get(self,request):
return render(request,self.template_name)
def post(self,request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/')
return render(request,self.template_name)
class LogoutPageView(View):
template_name = 'main_page.html'
def post(self, request):
logout(request)
return redirect('/')
class DetailPageView(View):
template_name = 'users/detail.html'
def get(self,request, user):
userobj = get_object_or_404(User, username=user)
return render(request,self.template_name,{'user': userobj})
# edit profile fields other than user id and password
class UserUpdate(UpdateView):
model = Coder
fields = ['institution','city','state','resume']
def get_object(self,*args,**kwargs):
user = self.request.user
return user.coder
def get_success_url(self, *args, **kwargs):
return reverse("home")
| [
"paritoshmait@gmail.com"
] | paritoshmait@gmail.com |
da83acb376af17ee4889f2e2866bb87ad92bc1a7 | 1220f32fbf835e7a853ee7ccc8ca13c215bc79cf | /Kivy_tutorial_files/Kivy_App_Tutorial_00/Box&Buttons/ButtonWidget_06.py | 03565d1cbf7eaf290a6f27c19ecf4a39a966067a | [] | no_license | CyborgVillager/Gui_Tutorial | 6775fc570427b424dc38b56cdaad2c17ef3d178d | 9f39efc3d62bcb2f22bbf1fa9d23ad96a04cc412 | refs/heads/master | 2022-11-06T10:14:44.006658 | 2020-01-11T15:35:14 | 2020-01-11T15:35:14 | 232,301,052 | 1 | 1 | null | 2022-10-21T05:28:32 | 2020-01-07T10:31:01 | Python | UTF-8 | Python | false | false | 370 | py | from kivy.lang import Builder
from kivy.base import runTouchApp
runTouchApp(Builder.load_string("""
Label:
Button:
text:'Jonathan'
font_size:32
color:222, 233, 7, 0.96
size:250,200
pos:50,100
Button:
text:'Joshua'
font_size:26
color:.8,.1,0,1
size:200,100
pos:75,350
""")) | [
"almawijonathan@gmail.com"
] | almawijonathan@gmail.com |
223827bb659d011399d16c21781ff4f7c4693e37 | 8253a563255bdd5797873c9f80d2a48a690c5bb0 | /configurationengine/source/plugins/symbian/ConeHCRPlugin/hcrplugin/tests/unittest_reader.py | bbb2421193268f7d267aa23848823b90add748c2 | [] | no_license | SymbianSource/oss.FCL.sftools.depl.swconfigmdw | 4e6ab52bf564299f1ed7036755cf16321bd656ee | d2feb88baf0e94da760738fc3b436c3d5d1ff35f | refs/heads/master | 2020-03-28T10:16:11.362176 | 2010-11-06T14:59:14 | 2010-11-06T14:59:14 | 73,009,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | #
# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved.
# This component and the accompanying materials are made available
# under the terms of "Eclipse Public License v1.0"
# which accompanies this distribution, and is available
# at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
# Initial Contributors:
# Nokia Corporation - initial contribution.
#
# Contributors:
#
# Description:
#
import os, unittest
from testautomation.utils import hex_to_bindata
from hcrplugin.hcr_reader import HcrReader
from hcrplugin import hcr_exceptions
from hcrplugin.hcrrepository import HcrRecord
class TestHcrReader(unittest.TestCase):
def setUp(self):
self.reader = HcrReader()
def test_read_repo_with_invalid_record_section_size(self):
# Record section size: 4 * 20 = 80
# LSD offset: 32 + 80 = 112
# LSD size: 0
data = [
# Header
# Record count should be 4, but is 6 here
"48435266 0200 0300 06000000 70000000",
"00000000 000000000000000000000000",
# Record section
"01000000 01000000 08000000 0000 0000 01000000", # bool
"02000000 01000000 04000000 0000 0000 85FFFFFF", # int8
"03000000 01000000 40000000 0000 0000 CC000000", # uint8
"01000000 02000000 02000000 0000 0000 91CBFFFF", # int16
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidHcrDataSizeError:
pass
def test_read_repo_with_invalid_lsd_section_size(self):
# Record section size: 4 * 20 = 80
# LSD offset: 32 + 80 = 112
# LSD size: 0
data = [
# Header
# LSD section size should be 0, but is 40 here
"48435266 0200 0300 04000000 70000000",
"28000000 000000000000000000000000",
# Record section
"01000000 01000000 08000000 0000 0000 01000000", # bool
"02000000 01000000 04000000 0000 0000 85FFFFFF", # int8
"03000000 01000000 40000000 0000 0000 CC000000", # uint8
"01000000 02000000 02000000 0000 0000 91CBFFFF", # int16
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidHcrDataSizeError:
pass
def test_read_repo_with_invalid_lsd_section_offset(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header, LSD offset here is 60
"48435266 0200 0300 02000000 3C000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 00000002 0000 0800 08000000", # uint64, lsd pos = (8, 8)
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidLsdSectionOffsetError:
pass
def test_read_repo_with_invalid_lsd_pos_in_record(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header
"48435266 0200 0300 02000000 48000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 00000002 0000 0800 0C000000", # uint64, lsd pos = (12, 8), should be (8, 8)
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordLsdPositionError:
pass
def test_read_repo_with_invalid_record_value_type(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header
"48435266 0200 0300 02000000 48000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 DEADBEEF 0000 0800 0C000000", # invalid type
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordValueTypeError:
pass
def _run_test_read_record_with_invalid_lsd_size(self, value_type, lsd_data):
try:
self.reader.parse_record_value_from_lsd_bindata(value_type, lsd_data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordLsdPositionError:
pass
def test_read_record_with_invalid_lsd_size_int64(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_INT64, data)
def test_read_record_with_invalid_lsd_size_uint64(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_UINT64, data)
def test_read_record_with_invalid_lsd_size_arrayint32(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_ARRAY_INT32, data)
def test_read_record_with_invalid_lsd_size_arrayuint32(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_ARRAY_UINT32, data)
def test_read_record_with_invalid_data_size(self):
try:
self.reader.parse_record_from_bindata('1234')
self.fail("Parsing invalid record data succeeded!")
except hcr_exceptions.HcrReaderError:
pass
def test_read_signed_integer_in_record(self):
#Test that padding bytes don't matter when reading the type
def check(record, data):
self.assertEquals(self.reader.parse_record_from_bindata(data)[0], record)
r = HcrRecord(HcrRecord.VALTYPE_INT8, -123, 12, 43, 5)
d = hex_to_bindata("0C000000 2B000000 04000000 0500 0000 85FFFFFF")
check(r, d)
d = hex_to_bindata("0C000000 2B000000 04000000 0500 0000 85000000")
check(r, d)
r = HcrRecord(HcrRecord.VALTYPE_INT16, -12345, 12, 43, 5)
d = hex_to_bindata("0C000000 2B000000 02000000 0500 0000 C7CFFFFF")
check(r, d)
d = hex_to_bindata("0C000000 2B000000 02000000 0500 0000 C7CF0000")
check(r, d)
| [
"none@none"
] | none@none |
60392a30744a47c26902a16ef07ffd786dbd9d0d | f0e6b08f33ea27ca0382f0bf8d9b5e33c01d1dfc | /djangoproject/myproject/myproject/settings.py | 0fcb5842788261b489b6bce73e7e2cb25479c510 | [] | no_license | nupur3101/batch430 | 8edef26909c2223031d8ef8690ba5cc6a0c83335 | cb9f0644a9f6662e731eb8b9b3f0762a738864f4 | refs/heads/master | 2020-12-28T04:45:52.825960 | 2020-01-30T12:35:33 | 2020-01-30T12:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ti=wk#krw4t4-2lt$i5weld7yf!%zxqh^nw4^p9a9u0r##6@*f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users',
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_SSL = True
EMAIL_PORT = 465
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = "simrangrover5@gmail.com"
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
#create database signup character set 'utf8' -->to create database at mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME' : 'signup', #database name
'HOST' : 'localhost',
'PORT' : 3306,
'USER' : 'root',
'PASSWORD' : ""
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ""
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
| [
"simrangrover5@gmail.com"
] | simrangrover5@gmail.com |
2cfd1c024c96fe49b55a5504ae8a2442e1d5c830 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /TkL6GTu9QMhYnv869_14.py | a02ec9a366f6a9388b2c80a13da2f67a67dc42a7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | """
Create a function that adds a string ending to each member in a list.
### Examples
add_ending(["clever", "meek", "hurried", "nice"], "ly")
➞ ["cleverly", "meekly", "hurriedly", "nicely"]
add_ending(["new", "pander", "scoop"], "er")
➞ ["newer", "panderer", "scooper"]
add_ending(["bend", "sharpen", "mean"], "ing")
➞ ["bending", "sharpening", "meaning"]
### Notes
* Don't forget to `return` the result.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def add_ending(lst, ending):
a = []
for i in range(len(lst)):
a1 = []
a1.append(lst[i])
a1.append(ending)
a.append(''.join(a1))
return a
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9babd500af3d3514b3868f6b6c35a7c0b134ea0b | 77ab53380f74c33bb3aacee8effc0e186b63c3d6 | /5389_food_orders.py | 608d2efac050ccce436916c2505a26f57eaf268a | [] | no_license | tabletenniser/leetcode | 8e3aa1b4df1b79364eb5ca3a97db57e0371250b6 | d3ebbfe2e4ab87d5b44bc534984dfa453e34efbd | refs/heads/master | 2023-02-23T18:14:31.577455 | 2023-02-06T07:09:54 | 2023-02-06T07:09:54 | 94,496,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | '''
Given the array orders, which represents the orders that customers have done in a restaurant. More specifically orders[i]=[customerNamei,tableNumberi,foodItemi] where customerNamei is the name of the customer, tableNumberi is the table customer sit at, and foodItemi is the item customer orders.
Return the restaurant's “display table”. The “display table” is a table whose row entries denote how many of each food item each table ordered. The first column is the table number and the remaining columns correspond to each food item in alphabetical order. The first row should be a header whose first column is “Table”, followed by the names of the food items. Note that the customer names are not part of the table. Additionally, the rows should be sorted in numerically increasing order.
Example 1:
Input: orders = [["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]]
Output: [["Table","Beef Burrito","Ceviche","Fried Chicken","Water"],["3","0","2","1","0"],["5","0","1","0","1"],["10","1","0","0","0"]]
Explanation:
The displaying table looks like:
Table,Beef Burrito,Ceviche,Fried Chicken,Water
3 ,0 ,2 ,1 ,0
5 ,0 ,1 ,0 ,1
10 ,1 ,0 ,0 ,0
For the table 3: David orders "Ceviche" and "Fried Chicken", and Rous orders "Ceviche".
For the table 5: Carla orders "Water" and "Ceviche".
For the table 10: Corina orders "Beef Burrito".
Example 2:
Input: orders = [["James","12","Fried Chicken"],["Ratesh","12","Fried Chicken"],["Amadeus","12","Fried Chicken"],["Adam","1","Canadian Waffles"],["Brianna","1","Canadian Waffles"]]
Output: [["Table","Canadian Waffles","Fried Chicken"],["1","2","0"],["12","0","3"]]
Explanation:
For the table 1: Adam and Brianna order "Canadian Waffles".
For the table 12: James, Ratesh and Amadeus order "Fried Chicken".
Example 3:
Input: orders = [["Laura","2","Bean Burrito"],["Jhon","2","Beef Burrito"],["Melissa","2","Soda"]]
Output: [["Table","Bean Burrito","Beef Burrito","Soda"],["2","1","1","1"]]
Constraints:
1 <= orders.length <= 5 * 10^4
orders[i].length == 3
1 <= customerNamei.length, foodItemi.length <= 20
customerNamei and foodItemi consist of lowercase and uppercase English letters and the space character.
tableNumberi is a valid integer between 1 and 500.
'''
from collections import defaultdict
class Solution:
def displayTable(self, orders):
table_orders = defaultdict(lambda: defaultdict(int))
food = set()
for c,t,f in orders:
food.add(f)
table_orders[t][f] += 1
# print(table_orders)
# print(food)
res = [[] for _ in range(len(table_orders))]
header = []
for f in food:
header.append(f)
header.sort()
header.insert(0, 'Table')
for i,key in enumerate(table_orders):
res[i].append(str(key))
table = table_orders[key]
for f in header[1:]:
res[i].append(str(table[f]))
res.sort(key=lambda x: int(x[0]))
res.insert(0, header)
return res
s = Solution()
# orders = [["James","12","Fried Chicken"],["Ratesh","12","Fried Chicken"],["Amadeus","12","Fried Chicken"],["Adam","1","Canadian Waffles"],["Brianna","1","Canadian Waffles"]]
orders = [["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]]
res = s.displayTable(orders)
print(res)
| [
"tabletenniser@gmail.com"
] | tabletenniser@gmail.com |
7674273a6bb9f50a92b66b039b3705b528f49169 | 549a573c35dd79f77ded35a0c9cc0b6074daba64 | /src/pipelines/epidemiology/ch_openzh.py | 84b9bfc4c3b7e25a898a84d4e355b0320286a7ec | [
"Apache-2.0",
"CC-BY-4.0",
"CC-BY-SA-3.0",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"AGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | harrisonzhu508/data | f91d5fb2847bfcba1c7debaad490266a11423424 | a3b95ced4abad6653d20f67f3f285abeeb0c2b25 | refs/heads/master | 2022-11-30T13:33:20.176773 | 2020-05-26T10:24:47 | 2020-05-26T10:24:47 | 266,201,099 | 0 | 0 | Apache-2.0 | 2020-08-03T20:55:05 | 2020-05-22T20:27:29 | HTML | UTF-8 | Python | false | false | 1,318 | py | from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DefaultPipeline
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class OpenZHPipeline(DefaultPipeline):
data_urls: List[str] = [
"https://raw.github.com/openZH/covid_19/master/COVID19_Fallzahlen_CH_total.csv"
]
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = (
dataframes[0]
.rename(
columns={
"ncumul_tested": "tested",
"ncumul_conf": "confirmed",
"ncumul_deceased": "deceased",
"ncumul_hosp": "hospitalized",
"ncumul_ICU": "intensive_care",
"ncumul_vent": "ventilator",
"ncumul_released": "recovered",
"abbreviation_canton_and_fl": "subregion1_code",
}
)
.drop(columns=["time", "source"])
)
# TODO: Match FL subdivision (not a canton?)
data = data[data.subregion1_code != "FL"]
data = grouped_diff(data, ["subregion1_code", "date"])
data["country_code"] = "CH"
return data
| [
"oscar@wahltinez.org"
] | oscar@wahltinez.org |
0dea551c1c168da413200bb795660b6ad3d2ebed | ee76919635ce69e14ddf64ee9483dca073625aaf | /pythonAlgorithm/Practice/2049统计最高分的节点数目.py | f02db61c51e3be23fda138522eb59a428d8074f0 | [] | no_license | bossjoker1/algorithm | 574e13f0dd8fe6b3e810efc03649493e90504288 | c745168a01380edb52155ca3918787d2dd356e5b | refs/heads/master | 2022-07-13T16:26:10.324544 | 2022-07-10T03:28:15 | 2022-07-10T03:28:15 | 407,361,838 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | class Solution:
def countHighestScoreNodes(self, parents: List[int]) -> int:
n = len(parents)
g = defaultdict(list)
for i in range(1, n):
g[parents[i]].append(i)
nums = [1] * n
def dfs(root:int) -> int:
if root not in g:
return 1
for item in g[root]:
nums[root] += dfs(item)
return nums[root]
dfs(0)
maxn, cnt = -1, 0
for i in range(n):
res = 1
if parents[i] == -1:
res *= 1
else:
res *= nums[0] - nums[i]
for item in g[i]:
res *= nums[item]
if res == maxn:
cnt += 1
elif res > maxn:
maxn = res
cnt = 1
return cnt
| [
"1397157763@qq.com"
] | 1397157763@qq.com |
af0935f71aa7ec410a3efca1dfe1563cbdb5e895 | acd1bec2b5f574aac9c91fa88cb2ad812c820066 | /Memorization Tool/task/table.py | dee73d297d70a4ac949778217a5f28dbdf5d3639 | [] | no_license | TogrulAga/Memorization-Tool | ec89006351947ea20fe6562024de750a2e1e0af9 | e1773ff8062ac4cff1018e06bf852f6315b414ab | refs/heads/master | 2023-07-05T21:14:18.522215 | 2021-08-08T10:17:47 | 2021-08-08T10:17:47 | 393,927,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class FlashCard(Base):
__tablename__ = "flashcard"
id = Column(Integer, primary_key=True)
question = Column(String)
answer = Column(String)
box_number = Column(Integer)
| [
"toghrul.aghakishiyev@ericsson.com"
] | toghrul.aghakishiyev@ericsson.com |
5b189b2f2f1e90e3245a51923b9761e2b2d19619 | 5ca2cca7762dcfcf9dd4b6a646fb8d39b9c2718f | /manager/migrations/0004_auto_20201213_2242.py | 4293fc64255a2037a96563320da320c050a0649d | [] | no_license | trilong0610/WebThoiTrang-Old | 372b99036f160b9eb9c503f7b1987177855f5d6a | ae04a43019916e87099edb614d0d155f139f0d09 | refs/heads/main | 2023-02-02T23:22:39.938038 | 2020-12-16T08:30:55 | 2020-12-16T08:30:55 | 321,085,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # Generated by Django 3.1.3 on 2020-12-13 15:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0003_auto_20201213_2240'),
]
operations = [
migrations.AlterField(
model_name='purchaseproduct',
name='time_create',
field=models.DateTimeField(default=datetime.datetime(2020, 12, 13, 22, 42, 23, 127128)),
),
]
| [
"trilong0610@gmail.com"
] | trilong0610@gmail.com |
b85551098ac4c2584a9d741b04153b773e5537fd | 9b0b0fbc5b5a9865108552eb8fe58a04f9cc0cc3 | /fabtools/icanhaz/python.py | 404644ba8740994e207506d9b4030fdc2e83c472 | [
"BSD-2-Clause"
] | permissive | thoas/fabtools | 8f77a24ed21f318e3bf7c8817a9b77a6def8ceb8 | 8cd63767384e589629e5f02e67bd498a8473b99c | refs/heads/master | 2021-01-18T06:36:29.737133 | 2011-09-05T15:55:36 | 2011-09-05T15:55:36 | 2,327,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | """
Idempotent API for managing python packages
"""
from fabtools.python import *
def package(pkg_name, virtualenv=None, use_sudo=False):
"""
I can haz python package
"""
if not is_installed(pkg_name):
install(pkg_name, virtualenv, use_sudo)
def packages(pkg_list, virtualenv=None, use_sudo=False):
"""
I can haz python packages
"""
pkg_list = [pkg for pkg in pkg_list if not is_installed(pkg)]
if pkg_list:
install(pkg_list, virtualenv, use_sudo)
| [
"ronan.amicel@gmail.com"
] | ronan.amicel@gmail.com |
1b40a16b02b23dda2bf80039eea52b5c4399e387 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/FSAF_for_Pytorch/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py | 34c195d94bfefb4f5edace4eeca67ed63fff2f2c | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 985 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
864cd0f813eea94b8cd02c8f890eb24dfdfa3cc8 | c6e744658cf9633f6571f349fff444d84634a754 | /umweltbank/spiders/umweltbank.py | f1324715bba82c34f32859f99a3a6acc7d91df01 | [] | no_license | daniel-kanchev/umweltbank | a76bd0f2d86e7cfac691a0dcc8064b4d0cfc358f | 17dbe445c85583d298249a084ec828b4ef607338 | refs/heads/main | 2023-03-19T18:12:03.420926 | 2021-03-15T12:54:35 | 2021-03-15T12:54:35 | 347,968,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from umweltbank.items import Article
class UmweltbankSpider(scrapy.Spider):
name = 'umweltbank'
start_urls = ['https://bankundumwelt.de/']
def parse(self, response):
links = response.xpath('//a[@class="post-link link"]/@href').getall()
yield from response.follow_all(links, self.parse_article)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1/text()').get()
if title:
title = title.strip()
date = response.xpath('//div[@class="post-meta-date meta-info"]/text()').get()
if date:
date = date.strip()
content = response.xpath('//div[@class="post-content"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
139bfda90e79fcdbf1bdb18feb466787fc6c150e | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/1-Python-Basics/20-list-method_20200413040951.py | f5bdbdbd3425864c68451d96e168feb776b6cffd | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | basket = [21, 12,33, 35, 99]
print(basket)
print(len(basket))
#33 gets pops because it is 2nd number in the array
print(basket.pop(2))
print(basket)
#extend
basket1 = [1000, 2000, 3000]
print(basket.extend(basket1))
print(basket)
#append - last to the list
print(basket.append(700))
print(basket)
#index
print(basket.index(21))
print(basket)
print(basket.sort())
#insert
print(basket.insert(5, 1020))
| [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
e70be8b0d16cb7fe3b8690933269906f2d221d46 | 345529a5ae9ac4831e1a04066612c929a2a8ad7e | /ayush_crowdbotics_378/wsgi.py | 51606165e32cea776f0b3de315377a6dfb613e20 | [] | no_license | payush/ayush-crowdbotics-378 | 29f7552e5d91c21f530af982bc16477fb11df28a | aaec9093eec6408df059172033ed8b4bf3be3c97 | refs/heads/master | 2020-03-23T22:35:05.533833 | 2018-07-24T16:17:07 | 2018-07-24T16:17:07 | 142,183,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | """
WSGI config for ayush_crowdbotics_378 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ayush_crowdbotics_378.settings")
application = get_wsgi_application()
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
c24714f252c9778e9b5426070d57d02130f12ed8 | 97124dfacdb78a9301aebf5a3a4ecad7a5d0116b | /bp/__init__.py | f953eecb7fa5f1feab26592a1ee4475b728301cb | [] | permissive | esayyari/improved-octo-waddle | 982a5bd156f76415bf5dba875309190acea77b9a | 7aca988734cc1107b2d991eb9379347a08a32792 | refs/heads/master | 2022-11-20T20:16:43.370024 | 2020-07-21T19:13:35 | 2020-07-21T19:13:35 | 281,470,015 | 0 | 0 | BSD-3-Clause | 2020-07-21T18:39:11 | 2020-07-21T18:07:52 | Jupyter Notebook | UTF-8 | Python | false | false | 580 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, BP development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from ._bp import BP
from ._io import parse_newick
from ._conv import to_skbio_treenode, from_skbio_treenode, to_skbio_treearray
__all__ = ['BP', 'parse_newick', 'to_skbio_treenode', 'from_skbio_treenode',
'to_skbio_treearray']
| [
"mcdonadt@colorado.edu"
] | mcdonadt@colorado.edu |
4be7a5039044527d2980a0bf1b7338f67739259d | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/5702d57eb4cb6ef6e7e8/snippet.py | 5ba8fa69b8f139f132805c7e897a493b1561e969 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,624 | py | #!/usr/bin/env python2.7
import argparse
import os
import qrcode
import qrcode.image.pil
import sqlite3
import sys
import urllib
class AuthenticatorAccount(object):
def __init__(self, account_name, account_desc, secret):
self.account_name = account_name
self.account_desc = account_desc
self.secret = secret
def __repr__(self):
return "AuthenticatorAccount@%s%s" % (hex(id(self))[2:], self.__dict__)
def __main__():
parser = argparse.ArgumentParser()
parser.add_argument("database", help="The SQLite database file.")
args = parser.parse_args()
if not os.path.isfile(args.database):
sys.stderr.write("Unable to open %s.\n" % (args.database,))
sys.stderr.flush()
sys.exit(1)
conn = sqlite3.connect(args.database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT * FROM accounts ORDER BY _id;")
row = None
while True:
row = cursor.fetchone()
if row is None:
break
account = AuthenticatorAccount(row['issuer'] or row['original_name'], row['email'],
row['secret'])
print """Saving "%s" to "qrcode-account-%02d.svg" """[:-1] % (account.account_desc,
row['_id'])
qr = qrcode.make("otpauth://totp/%s?secret=%s&issuer=%s" % (account.account_desc,
account.secret, account.account_name), image_factory=qrcode.image.pil.PilImage)
with open("qrcode-account-%02d.png" % (row['_id'],), "wb") as f:
qr.save(f)
if __name__ == "__main__":
__main__()
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
e21b5f5350a57068a2fab6ad42da5cb8ff4b1390 | 928dcef8e8d682f3e4062b0cb3d49e6151383138 | /setup.py | acf65858f3b5704078624f20c38d4740c8f5a803 | [
"MIT"
] | permissive | giserh/hug | 3aa5724aadf5c06dd8c7b5f867ea40ea057e2e78 | a15bb9497d23398dc82c496352d91e32ff183f13 | refs/heads/master | 2021-01-18T08:41:32.402648 | 2015-09-15T02:12:21 | 2015-09-15T02:12:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | #!/usr/bin/env python
import subprocess
import sys
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
extra_kwargs = {'tests_require': ['pytest', 'mock']}
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main(self.test_args))
except ImportError:
from distutils.core import setup, Command
class PyTest(Command):
extra_kwargs = {}
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
raise SystemExit(subprocess.call([sys.executable, 'runtests.py']))
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
readme = ''
setup(name='hug',
version='1.4.0',
description='A Python framework that makes developing APIs as simple as possible, but no simpler.',
long_description=readme,
author='Timothy Crosley',
author_email='timothy.crosley@gmail.com',
url='https://github.com/timothycrosley/hug',
license="MIT",
entry_points={
'console_scripts': [
'hug = hug:run.terminal',
]
},
packages=['hug'],
requires=['falcon'],
install_requires=['falcon'],
cmdclass={'test': PyTest},
keywords='Web, Python, Python3, Refactoring, REST, Framework, RPC',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'],
**PyTest.extra_kwargs)
| [
"timothy.crosley@gmail.com"
] | timothy.crosley@gmail.com |
d013dd9eb4c44f3e18ece5550ce2a994787e3aaa | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_deployment/online/resource_requirements_schema.py | 7f43d91fc60142f1307ebcac5a55c78d2d8f447f | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 899 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument
import logging
from typing import Any
from marshmallow import post_load
from azure.ai.ml._schema.core.fields import NestedField
from azure.ai.ml._schema.core.schema import PatchedSchemaMeta
from .resource_settings_schema import ResourceSettingsSchema
module_logger = logging.getLogger(__name__)
class ResourceRequirementsSchema(metaclass=PatchedSchemaMeta):
requests = NestedField(ResourceSettingsSchema)
limits = NestedField(ResourceSettingsSchema)
@post_load
def make(self, data: Any, **kwargs: Any) -> "ResourceRequirementsSettings":
from azure.ai.ml.entities import ResourceRequirementsSettings
return ResourceRequirementsSettings(**data)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
24e20f6e355cde10540c8b7eaacd974cbceb2334 | 5330918e825f8d373d3907962ba28215182389c3 | /CMGTools/Common/python/factories/cmgDiPFCandidate_cfi.py | e5a22266d42400df25dda74197c0de4f68b1198c | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 415 | py | import FWCore.ParameterSet.Config as cms
from CMGTools.Common.factories.cmgDiObject_cfi import diObjectFactory
cmgDiPFCandidateFactory = diObjectFactory.clone(
leg1Collection = cms.InputTag("particleFlow"),
leg2Collection = cms.InputTag("particleFlow"),
)
cmgDiPFCandidate = cms.EDFilter(
"DiPFCandidatePOProducer",
cfg = cmgDiPFCandidateFactory.clone(),
cuts = cms.PSet(
),
)
| [
"colin.bernet@cern.ch"
] | colin.bernet@cern.ch |
cffd3f09c260d4a20c639febd7d8c9d868a3c8bb | e71fa62123b2b8f7c1a22acb1babeb6631a4549b | /xlsxwriter/test/comparison/test_chart_layout03.py | 8edd7c7b233330d0dc5e1166cecf00a1a5e839f5 | [
"BSD-2-Clause"
] | permissive | timgates42/XlsxWriter | 40480b6b834f28c4a7b6fc490657e558b0a466e5 | 7ad2541c5f12b70be471b447ab709c451618ab59 | refs/heads/main | 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 | NOASSERTION | 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null | UTF-8 | Python | false | false | 1,650 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_layout03.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68312064, 69198592]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_legend({
'position': 'overlay_right',
'layout': {
'x': 0.80197353455818043,
'y': 0.3744240303295423,
'width': 0.12858202099737534,
'height': 0.25115157480314959,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
c9f2b0fb720c3590de12e05c6c7e8e3c9323feb6 | a60e81b51935fb53c0900fecdadba55d86110afe | /LeetCode/python/76___________hard_Minimum Window Substring.py | 168c878e0eb107b71a2577dc7bb95e39d86f3841 | [] | no_license | FrankieZhen/Lookoop | fab6855f5660467f70dc5024d9aa38213ecf48a7 | 212f8b83d6ac22db1a777f980075d9e12ce521d2 | refs/heads/master | 2020-07-27T08:12:45.887814 | 2019-09-16T11:48:20 | 2019-09-16T11:48:20 | 209,021,915 | 1 | 0 | null | 2019-09-17T10:10:46 | 2019-09-17T10:10:46 | null | UTF-8 | Python | false | false | 1,422 | py | """
Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
If there is no such window in S that covers all characters in T, return the empty string "".
If there is such window, you are guaranteed that there will always be only one unique minimum window in S.
"""
import collections
# 2018-6-26
# Minimum Window Substring
# https://leetcode.com/problems/minimum-window-substring/discuss/26804/12-lines-Python
class Solution:
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
need, missing = collections.Counter(t), len(t)
print(need,missing)
i = I = J = 0
for j, c in enumerate(s, 1):
missing -= need[c] > 0 # missing = missing -1 if need[c] > 0 else missing
print(j,c,missing,need,i,j,I,J)
need[c] -= 1 # 添加c并使得 c : -1
if not missing: # missing <= 0
while i < j and need[s[i]] < 0:
need[s[i]] += 1
i += 1
if not J or j - i <= J - I:
I, J = i, j
#print(need)
print(j,c,missing,need,i,j,I,J)
return s[I:J]
# test
S = "ADOBECODEBANC"
T = "ABC"
test = Solution()
res = test.minWindow(S,T)
print(res) | [
"33798487+YangXiaoo@users.noreply.github.com"
] | 33798487+YangXiaoo@users.noreply.github.com |
bb80ed73e3d7d90ea18dfe174b196ba572578e48 | bf2d87cc14f983a6c563ebe1bd49c48a7474ddff | /2018年力扣高频算法面试题汇总/完全平方数 my_dp.py | ec40fb66f459e16640f69cbea7ef8735b0aac1de | [] | no_license | iamkissg/leetcode | 6bd1c87c67ffc6f5d231cac3224c928e22f62af3 | 99a3abf1774933af73a8405f9b59e5e64906bca4 | refs/heads/master | 2020-05-05T11:21:05.900296 | 2019-10-27T10:51:51 | 2019-10-27T10:51:51 | 179,986,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from math import sqrt, floor
class Solution:
# 递归不可行, 因为包含一个 1, 在事先不知道四平方数定理的情况下, 最常的路径会导致栈被撑爆.
# def numSquares(self, n: int) -> int:
# if n in self.memo:
# return self.memo[n]
# result = min([self.numSquares(n-i**2) for i in range(1, floor(sqrt(n))+1)])+1
# self.memo[n] = result
# return result
# 将 memo 从 `def __init__(self)` 中取出来是关键, 从实例变量变成了类变量, 作弊啊
# 176 ms
memo = {0: 0, 1: 1}
def numSquares(self, n: int) -> int:
if n in self.memo:
return self.memo[n]
i = 1
while n not in self.memo:
i += 1
if i in self.memo:
continue
self.memo[i] = min((self.memo[i-j*j] for j in range(1, floor(sqrt(i))+1))) + 1
return self.memo[n]
if __name__ == "__main__":
sol = Solution()
print(sol.memo)
for i in [12,2,3,4,5,6,7,8,9,10,5373,5374]:
print(sol.numSquares(i))
print('='*80)
print(sol.numSquares(2))
print(sol.numSquares(8))
print(sol.numSquares(10))
print(sol.numSquares(11))
print(sol.numSquares(12))
print(sol.numSquares(13))
print(sol.numSquares(5673))
print(sol.numSquares(5674))
| [
"enginechen07@gmail.com"
] | enginechen07@gmail.com |
8d49ac34a3e825f622adeb42a001aa171bd8c13f | b9fd9ed02312be96e05ef23243c4dfac1392be08 | /tensorflow/contrib/py2tf/utils/multiple_dispatch_test.py | 8d89b6898a366fe90ee1d43a55d0a7f10690224b | [
"Apache-2.0"
] | permissive | RLeili/tensorflow | 9e5650b5d02771da94a345ceb97b4f3293638e1e | 42ee949d022d8665cf2e908e800f1ef1594c6abf | refs/heads/master | 2021-04-09T11:51:32.393739 | 2019-10-16T16:44:23 | 2019-10-16T16:44:23 | 125,318,700 | 0 | 0 | Apache-2.0 | 2018-03-15T05:50:05 | 2018-03-15T05:50:04 | null | UTF-8 | Python | false | false | 3,748 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple_dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.py2tf.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1.eval())
self.assertTrue(should_be_true2.eval())
self.assertFalse(should_be_false1.eval())
self.assertFalse(should_be_false2.eval())
def test_run_cond_python(self):
true_fn = lambda: 2.0
false_fn = lambda: 3.0
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2.0)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3.0)
def test_run_cond_tf(self):
true_fn = lambda: constant([2.0])
false_fn = lambda: constant([3.0])
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2.0)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3.0)
def test_run_while_python(self):
cond_fn = lambda x, t, s: x > t
body_fn = lambda x, t, s: (x * s, t, s)
x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn, [3.0, 1.0, 0.5])
self.assertEqual(x, 0.75)
x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn, [3.0, 4.0, 0.5])
self.assertEqual(x, 3.0)
def test_run_while_tf(self):
cond_fn = lambda x, t, s: x > t
body_fn = lambda x, t, s: (x * s, t, s)
with Session() as sess:
x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn,
[constant(3.0), 1.0, 0.5])
self.assertEqual(sess.run(x), 0.75)
x, _, _ = multiple_dispatch.run_while(cond_fn, body_fn,
[constant(3.0), 4.0, 0.5])
self.assertEqual(sess.run(x), 3.0)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8bff32a3731de4f870e8a5a3d26bb063abc9e9ad | 453956e3a66b417a0c6f999951c44e474a81af7e | /19.05.2021/exc0/tests.py | 968d0baef2c3e2d47c85b11f324d27e2ce8fe288 | [] | no_license | conradylx/Python_Course | aa7db9671a30034fe8cf65d22304e76ef2b4c4ab | b8f813c077a61bd3321638f90633529fbda756f0 | refs/heads/main | 2023-06-02T07:24:57.073365 | 2021-06-13T17:41:59 | 2021-06-13T17:41:59 | 337,829,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import unittest
from exc0 import triangle, trapezoid
class FieldsTestCase(unittest.TestCase):
def setUp(self):
self.a = 2
self.b = 3
self.h = 5
def test_triangle_with_correct_result(self):
result = triangle(self.a, self.h)
self.assertEqual(result, 5)
def test_triangle_with_incorrect_values(self):
with self.assertRaises(TypeError):
triangle("*", self.h)
def test_trapezoid_with_correct_result(self):
result = trapezoid(self.a, self.b, self.h)
self.assertEqual(result, 12.5)
def test_trapezoid_with_incorrect_value(self):
with self.assertRaises(TypeError):
trapezoid('**', self.b, self.h)
def tearDown(self):
del self.a, self.h
if __name__ == '__main__':
unittest.main()
| [
"50596942+conradylx@users.noreply.github.com"
] | 50596942+conradylx@users.noreply.github.com |
43b5830be6db5503e0c7a5b34a5a8d7940745656 | 22a5d684341cee8f1095c3fe193f01f40f8121db | /2021/Qualification/E. Cheating Detection/2021-q-e.py | 7b93ef36bb2a94a0202dad9af0142a53f0e34eaf | [] | no_license | andy1li/codejam | 161b1db6faab372a4c2c4ce5956942387c650bed | 3aa6ab1673064b8c80b5f56422bd496b372b30f3 | refs/heads/master | 2022-06-28T02:42:53.980149 | 2022-06-27T20:15:11 | 2022-06-27T20:15:11 | 53,395,936 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # 2021 Qualification Round - E. Cheating Detection
# https://codingcompetitions.withgoogle.com/codejam/round/000000000043580a/00000000006d12d7
import numpy as np
#------------------------------------------------------------------------------#
def solve(data):
difficulty = np.mean(data, axis=0)
correct_spreads = [difficulty[row].std() for row in data]
return np.argmax(correct_spreads) + 1
#------------------------------------------------------------------------------#
T, _ = int(input()), input()
for i in range(T):
data = [ [bool(int(x)) for x in input()] for _ in range(100) ]
result = solve(data)
print('Case #{}:'.format(i+1), result) | [
"li.chenxing@gmail.com"
] | li.chenxing@gmail.com |
68d8523fe078e7be65b1c937304f635861598508 | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_EvaluationCornishFisherLogN.py | ab21f423595458d1481180db8baab78c58386dad | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EvaluationCornishFisherLogN [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EvaluationCornishFisherLogN&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EBCornishFisherEvaluation).
# ## Prepare the environment
# +
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import array
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from PortfolioMomentsLogN import PortfolioMomentsLogN
from CornishFisher import CornishFisher
# parameters
v_tnow = array([[2], [1.5]])
mu = array([[0.5], [-0.3]])
sigma2 = array([[0.55, 0.82],
[0.82, 1.05]])
h = array([[2], [1]])
c = 0.95
# -
# ## Computation of the expectation, the standard deviation and the skewness
# ## of the portfolio's P&L using function PortfolioMomentsLogN
muPL_h, sdPL_h, skPL_h = PortfolioMomentsLogN(v_tnow, h, mu, sigma2)
# ## Using the skewness computed at the previous step, compute the third central
# ## moment of the portfolio's P&L
third_central = skPL_h@(sdPL_h) ** 3
# ## Computation of the Cornish-Fisher expansion of the quantile based-index
# ## with confidence c=0.95 using function CornishFisher
q = CornishFisher(muPL_h, sdPL_h, skPL_h, 1 - c)
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
d8e9124430e8d00df512614fb61d1275470e6dff | b8fe1fbe36bff3f05cceecbe9811699ba1fb6765 | /python_processThreading/asynico_oo/coroutines_asyncio.py | 4b9d73a2e3374a9d178965c6d8ce5adda1366e3a | [] | no_license | xiaotiankeyi/PythonBase | c2edfeac9fe8d116a68725a784bcb183b1308af9 | 8f1377eb03135e8ee9c047f6e7762a0d69601ca1 | refs/heads/master | 2023-03-16T19:43:45.589440 | 2023-03-13T06:05:53 | 2023-03-13T06:05:53 | 219,412,971 | 0 | 0 | null | 2022-02-27T18:57:30 | 2019-11-04T03:51:29 | JavaScript | UTF-8 | Python | false | false | 727 | py | # 概念:asyncio是3.4以后的协程模块,是python实现并发重要的包,使用事件循环驱动实现并发,实现异步io
import asyncio
async def aunt():
for i in range(10):
print(f'{i}婶婶说,python是世界上最好的语言')
await asyncio.sleep(1) # 释放cpu避免阻塞
async def uncle():
for i in range(10):
print(f'{i}叔叔说,python是世界上最好的语言')
await asyncio.sleep(0.5)
if __name__ == "__main__":
a1 = aunt()
a2 = uncle()
# 创建循环事件
loop = asyncio.get_event_loop()
# 创建监听事件,会阻塞直到事件完成,
loop.run_until_complete(asyncio.gather(a1, a2))
# 关闭事件循环
loop.close() | [
"laizhitian163@163.com"
] | laizhitian163@163.com |
29858121c108ccddea63d18579cac7770cfd723a | 31b3ac7cc2f0cf43a4979e53d43002a9c5fb2038 | /find the duplicate number.py | 9c5c5e678c6f0e93eadb36086de26bd63e1a9662 | [] | no_license | shreyansh-tyagi/leetcode-problem | ed31ada9608a1526efce6178b4fe3ee18da98902 | f8679a7b639f874a52cf9081b84e7c7abff1d100 | refs/heads/master | 2023-08-26T13:50:27.769753 | 2021-10-29T17:39:41 | 2021-10-29T17:39:41 | 378,711,844 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | '''
Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
There is only one repeated number in nums, return this repeated number.
You must solve the problem without modifying the array nums and uses only constant extra space.
Example 1:
Input: nums = [1,3,4,2,2]
Output: 2
Example 2:
Input: nums = [3,1,3,4,2]
Output: 3
Example 3:
Input: nums = [1,1]
Output: 1
Example 4:
Input: nums = [1,1,2]
Output: 1
Constraints:
1 <= n <= 105
nums.length == n + 1
1 <= nums[i] <= n
All the integers in nums appear only once except for precisely one integer which appears two or more times.
Follow up:
How can we prove that at least one duplicate number must exist in nums?
Can you solve the problem in linear runtime complexity?
'''
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
for i in range(len(nums)-1):
if nums[i]==nums[i+1]:
return nums[i] | [
"sunnytyagi886@gmail.com"
] | sunnytyagi886@gmail.com |
9f4cfac14ac62d7112c411ced01e0372d6b107e1 | 37594c48dfb4c80b3c07a9dfb5a2eac8aa4b69f3 | /guitool/__PYQT__/QtCore.py | cde2345e15eae523efd9e94f5b138c454072c907 | [
"Apache-2.0"
] | permissive | SU-ECE-18-7/guitool | 45e0246feedeebb82950f166305f23dd308f5937 | 4d7e09e3318de92456912e84436d6ce8e1cf8e47 | refs/heads/master | 2021-06-01T09:51:56.452050 | 2016-06-24T02:28:20 | 2016-06-24T02:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from __future__ import absolute_import, division, print_function
# flake8:noqa
# Wrapper around PyQt4/5
from PyQt4.QtCore import *
| [
"crallj@rpi.edu"
] | crallj@rpi.edu |
8a42862ec234fb7a7fa0b4d5e8ff400e32f4f800 | dd87194dee537c2291cf0c0de809e2b1bf81b5b2 | /k8sclient/models/v1alpha1_certificate_signing_request_spec.py | a896c60a5b40a38a2b092ea2a8d5c370cc10488a | [
"Apache-2.0"
] | permissive | Arvinhub/client-python | 3ea52640ab02e4bf5677d0fd54fdb4503ecb7768 | d67df30f635231d68dc4c20b9b7e234c616c1e6a | refs/heads/master | 2023-08-31T03:25:57.823810 | 2016-11-02T22:44:36 | 2016-11-02T22:44:36 | 73,865,578 | 1 | 0 | Apache-2.0 | 2018-10-10T12:16:45 | 2016-11-15T23:47:17 | Python | UTF-8 | Python | false | false | 5,867 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1CertificateSigningRequestSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, groups=None, request=None, uid=None, username=None):
"""
V1alpha1CertificateSigningRequestSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'groups': 'list[str]',
'request': 'str',
'uid': 'str',
'username': 'str'
}
self.attribute_map = {
'groups': 'groups',
'request': 'request',
'uid': 'uid',
'username': 'username'
}
self._groups = groups
self._request = request
self._uid = uid
self._username = username
@property
def groups(self):
"""
Gets the groups of this V1alpha1CertificateSigningRequestSpec.
:return: The groups of this V1alpha1CertificateSigningRequestSpec.
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""
Sets the groups of this V1alpha1CertificateSigningRequestSpec.
:param groups: The groups of this V1alpha1CertificateSigningRequestSpec.
:type: list[str]
"""
self._groups = groups
@property
def request(self):
"""
Gets the request of this V1alpha1CertificateSigningRequestSpec.
Base64-encoded PKCS#10 CSR data
:return: The request of this V1alpha1CertificateSigningRequestSpec.
:rtype: str
"""
return self._request
@request.setter
def request(self, request):
"""
Sets the request of this V1alpha1CertificateSigningRequestSpec.
Base64-encoded PKCS#10 CSR data
:param request: The request of this V1alpha1CertificateSigningRequestSpec.
:type: str
"""
if request is None:
raise ValueError("Invalid value for `request`, must not be `None`")
self._request = request
@property
def uid(self):
"""
Gets the uid of this V1alpha1CertificateSigningRequestSpec.
:return: The uid of this V1alpha1CertificateSigningRequestSpec.
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""
Sets the uid of this V1alpha1CertificateSigningRequestSpec.
:param uid: The uid of this V1alpha1CertificateSigningRequestSpec.
:type: str
"""
self._uid = uid
@property
def username(self):
"""
Gets the username of this V1alpha1CertificateSigningRequestSpec.
Information about the requesting user (if relevant) See user.Info interface for details
:return: The username of this V1alpha1CertificateSigningRequestSpec.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this V1alpha1CertificateSigningRequestSpec.
Information about the requesting user (if relevant) See user.Info interface for details
:param username: The username of this V1alpha1CertificateSigningRequestSpec.
:type: str
"""
self._username = username
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
2eae9fbaf32e79fe20d43bf241575f4ac6f685d9 | 5e67301779cc6f685018e4db6f2605a306d53be8 | /prognoz/migrations/0026_settlements_description.py | c11b2b4a7dd88b430f10512ad51aa6681f4d1b23 | [] | no_license | avpakh/recon | 905e93374ec73501b5002bf0ef823b00715d7da8 | 4a99c0c2d2644f5847ebdf9bdfd03217cd0269f3 | refs/heads/master | 2021-01-09T06:11:37.389010 | 2017-02-04T19:32:34 | 2017-02-04T19:32:34 | 80,936,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prognoz', '0025_auto_20151110_0727'),
]
operations = [
migrations.AddField(
model_name='settlements',
name='description',
field=models.CharField(default=1, max_length=60, verbose_name=b'\xd0\x9e\xd0\xbf\xd0\xb8\xd1\x81\xd0\xb0\xd0\xbd\xd0\xb8\xd0\xb5 \xd1\x80\xd0\xb8\xd1\x81\xd0\xba\xd0\xb0'),
preserve_default=False,
),
]
| [
"aliaksandr.pakhomau@gmail.com"
] | aliaksandr.pakhomau@gmail.com |
9fffd6c798905d34cf98c2c3c44ace889d640fe0 | 88fcb04d4bafb1745ae4b86807b96198d06d6709 | /bigml/constants.py | d68049527e070f3acc840db04fafe34c57d8c299 | [
"Apache-2.0"
] | permissive | gnib/python | dad9501460a866a9cfa23dfe581d89bd03ca1a5d | 185fd030706992766f54cc7ec5e914df57f5a29f | refs/heads/master | 2021-01-03T13:09:10.302254 | 2017-07-28T22:56:57 | 2017-07-28T22:56:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2017 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common auxiliary constants for all resources
"""
import re
# Basic resources
SOURCE_PATH = 'source'
DATASET_PATH = 'dataset'
MODEL_PATH = 'model'
PREDICTION_PATH = 'prediction'
EVALUATION_PATH = 'evaluation'
ENSEMBLE_PATH = 'ensemble'
BATCH_PREDICTION_PATH = 'batchprediction'
CLUSTER_PATH = 'cluster'
CENTROID_PATH = 'centroid'
BATCH_CENTROID_PATH = 'batchcentroid'
ANOMALY_PATH = 'anomaly'
ANOMALY_SCORE_PATH = 'anomalyscore'
BATCH_ANOMALY_SCORE_PATH = 'batchanomalyscore'
PROJECT_PATH = 'project'
SAMPLE_PATH = 'sample'
CORRELATION_PATH = 'correlation'
STATISTICAL_TEST_PATH = 'statisticaltest'
LOGISTIC_REGRESSION_PATH = 'logisticregression'
ASSOCIATION_PATH = 'association'
ASSOCIATION_SET_PATH = 'associationset'
TOPIC_MODEL_PATH = 'topicmodel'
TOPIC_DISTRIBUTION_PATH = 'topicdistribution'
BATCH_TOPIC_DISTRIBUTION_PATH = 'batchtopicdistribution'
TIME_SERIES_PATH = 'timeseries'
FORECAST_PATH = 'forecast'
SCRIPT_PATH = 'script'
EXECUTION_PATH = 'execution'
LIBRARY_PATH = 'library'
# Resource Ids patterns
ID_PATTERN = '[a-f0-9]{24}'
SHARED_PATTERN = '[a-zA-Z0-9]{24,30}'
SOURCE_RE = re.compile(r'^%s/%s$' % (SOURCE_PATH, ID_PATTERN))
DATASET_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
DATASET_PATH, ID_PATTERN, DATASET_PATH, SHARED_PATTERN))
MODEL_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
MODEL_PATH, ID_PATTERN, MODEL_PATH, SHARED_PATTERN))
PREDICTION_RE = re.compile(r'^%s/%s$' % (PREDICTION_PATH, ID_PATTERN))
EVALUATION_RE = re.compile(r'^%s/%s$' % (EVALUATION_PATH, ID_PATTERN))
ENSEMBLE_RE = re.compile(r'^%s/%s$' % (ENSEMBLE_PATH, ID_PATTERN))
BATCH_PREDICTION_RE = re.compile(r'^%s/%s$' % (BATCH_PREDICTION_PATH,
ID_PATTERN))
CLUSTER_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
CLUSTER_PATH, ID_PATTERN, CLUSTER_PATH, SHARED_PATTERN))
CENTROID_RE = re.compile(r'^%s/%s$' % (CENTROID_PATH, ID_PATTERN))
BATCH_CENTROID_RE = re.compile(r'^%s/%s$' % (BATCH_CENTROID_PATH,
ID_PATTERN))
ANOMALY_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
ANOMALY_PATH, ID_PATTERN, ANOMALY_PATH, SHARED_PATTERN))
ANOMALY_SCORE_RE = re.compile(r'^%s/%s$' % (ANOMALY_SCORE_PATH, ID_PATTERN))
BATCH_ANOMALY_SCORE_RE = re.compile(r'^%s/%s$' % (BATCH_ANOMALY_SCORE_PATH,
ID_PATTERN))
PROJECT_RE = re.compile(r'^%s/%s$' % (PROJECT_PATH, ID_PATTERN))
SAMPLE_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % (
SAMPLE_PATH, ID_PATTERN, SAMPLE_PATH, SHARED_PATTERN))
CORRELATION_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % (
CORRELATION_PATH, ID_PATTERN, CORRELATION_PATH, SHARED_PATTERN))
STATISTICAL_TEST_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(STATISTICAL_TEST_PATH, ID_PATTERN, STATISTICAL_TEST_PATH, SHARED_PATTERN))
LOGISTIC_REGRESSION_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(LOGISTIC_REGRESSION_PATH, ID_PATTERN,
LOGISTIC_REGRESSION_PATH, SHARED_PATTERN))
ASSOCIATION_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(ASSOCIATION_PATH, ID_PATTERN, ASSOCIATION_PATH, SHARED_PATTERN))
ASSOCIATION_SET_RE = re.compile(r'^%s/%s$' % \
(ASSOCIATION_SET_PATH, ID_PATTERN))
TOPIC_MODEL_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
TOPIC_MODEL_PATH, ID_PATTERN, TOPIC_MODEL_PATH, SHARED_PATTERN))
TOPIC_DISTRIBUTION_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
TOPIC_DISTRIBUTION_PATH, ID_PATTERN, TOPIC_DISTRIBUTION_PATH,
SHARED_PATTERN))
BATCH_TOPIC_DISTRIBUTION_RE = re.compile(r'^(public/)?%s/%s$|^shared/%s/%s$' % (
BATCH_TOPIC_DISTRIBUTION_PATH, ID_PATTERN, BATCH_TOPIC_DISTRIBUTION_PATH,
SHARED_PATTERN))
TIME_SERIES_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(TIME_SERIES_PATH, ID_PATTERN, TIME_SERIES_PATH, SHARED_PATTERN))
FORECAST_RE = re.compile(r'^%s/%s$' % \
(FORECAST_PATH, ID_PATTERN))
SCRIPT_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(SCRIPT_PATH, ID_PATTERN, SCRIPT_PATH, SHARED_PATTERN))
EXECUTION_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(EXECUTION_PATH, ID_PATTERN, EXECUTION_PATH, SHARED_PATTERN))
LIBRARY_RE = re.compile(r'^%s/%s|^shared/%s/%s$' % \
(LIBRARY_PATH, ID_PATTERN, LIBRARY_PATH, SHARED_PATTERN))
RESOURCE_RE = {
SOURCE_PATH: SOURCE_RE,
DATASET_PATH: DATASET_RE,
MODEL_PATH: MODEL_RE,
PREDICTION_PATH: PREDICTION_RE,
EVALUATION_PATH: EVALUATION_RE,
ENSEMBLE_PATH: ENSEMBLE_RE,
BATCH_PREDICTION_PATH: BATCH_PREDICTION_RE,
CLUSTER_PATH: CLUSTER_RE,
CENTROID_PATH: CENTROID_RE,
BATCH_CENTROID_PATH: BATCH_CENTROID_RE,
ANOMALY_PATH: ANOMALY_RE,
ANOMALY_SCORE_PATH: ANOMALY_SCORE_RE,
BATCH_ANOMALY_SCORE_PATH: BATCH_ANOMALY_SCORE_RE,
PROJECT_PATH: PROJECT_RE,
SAMPLE_PATH: SAMPLE_RE,
CORRELATION_PATH: CORRELATION_RE,
STATISTICAL_TEST_PATH: STATISTICAL_TEST_RE,
LOGISTIC_REGRESSION_PATH: LOGISTIC_REGRESSION_RE,
ASSOCIATION_PATH: ASSOCIATION_RE,
ASSOCIATION_SET_PATH: ASSOCIATION_SET_RE,
TOPIC_MODEL_PATH: TOPIC_MODEL_RE,
TOPIC_DISTRIBUTION_PATH: TOPIC_DISTRIBUTION_RE,
BATCH_TOPIC_DISTRIBUTION_PATH: BATCH_TOPIC_DISTRIBUTION_RE,
TIME_SERIES_PATH: TIME_SERIES_RE,
FORECAST_PATH: FORECAST_RE,
SCRIPT_PATH: SCRIPT_RE,
EXECUTION_PATH: EXECUTION_RE,
LIBRARY_PATH: LIBRARY_RE}
RENAMED_RESOURCES = {
BATCH_PREDICTION_PATH: 'batch_prediction',
BATCH_CENTROID_PATH: 'batch_centroid',
ANOMALY_SCORE_PATH: 'anomaly_score',
BATCH_ANOMALY_SCORE_PATH: 'batch_anomaly_score',
STATISTICAL_TEST_PATH: 'statistical_test',
LOGISTIC_REGRESSION_PATH: 'logistic_regression',
ASSOCIATION_SET_PATH: 'association_set',
TOPIC_MODEL_PATH: 'topic_model',
TOPIC_DISTRIBUTION_PATH: 'topic_distribution',
BATCH_TOPIC_DISTRIBUTION_PATH: 'batch_topic_distribution',
TIME_SERIES_PATH: 'time_series'
}
# Resource status codes
WAITING = 0
QUEUED = 1
STARTED = 2
IN_PROGRESS = 3
SUMMARIZED = 4
FINISHED = 5
UPLOADING = 6
FAULTY = -1
UNKNOWN = -2
RUNNABLE = -3
# Minimum query string to get model fields
TINY_RESOURCE = "full=false"
| [
"merce@bigml.com"
] | merce@bigml.com |
d9fc547ef77070538f454815fa5e74d3c62c1312 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03611/s329630070.py | 2da7de28a0f7899a21bdeddfa3f4520533bf8ad0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import sys
from collections import Counter
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, *A = map(int, read().split())
counter = Counter()
for a in A:
counter[a - 1] += 1
counter[a] += 1
counter[a + 1] += 1
ans = max(counter.values())
print(ans)
return
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e9165487a666303d57c21ab719a94938efc5b12d | 63d37b990c194a68fbb8d2d288de5faae374ed26 | /main/migrations/0010_photo_isavatar.py | c396985b39a95cdfa776c6e3981996e77b117cce | [] | no_license | Dostoyewski/MH_Back | bf16cd83ff5103e65a5f3fe7d866a2b6dbe66624 | 6de68e1a14643a23e268f1c313224cf7bea89c75 | refs/heads/master | 2022-03-30T18:04:51.340938 | 2020-04-05T08:06:04 | 2020-04-05T08:06:04 | 251,396,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Generated by Django 2.2.1 on 2020-04-04 19:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_hero_stage'),
]
operations = [
migrations.AddField(
model_name='photo',
name='isAvatar',
field=models.BooleanField(default=False),
),
]
| [
"dostoyewski@yandex.ru"
] | dostoyewski@yandex.ru |
350249cd29157e132cd1e6549a7530a9fc74ae0f | da386754e12ed3e251d5fb9091d9416b9f97edc7 | /examples/large_deformation/active_fibres.py | c4e19db861b1c9544510221fd8f0f2e5268df656 | [
"BSD-3-Clause"
] | permissive | nasseralkmim/sfepy | 5b5642f084b62632c1ca48035e510f27728e25ab | 647f1754bcd4fd103cd19a03ed36cb10ebc8fd15 | refs/heads/master | 2020-04-06T04:57:21.589694 | 2016-08-03T12:38:31 | 2016-08-03T12:38:31 | 65,736,316 | 2 | 1 | null | 2016-08-15T13:58:01 | 2016-08-15T13:58:01 | null | UTF-8 | Python | false | false | 6,335 | py | # -*- coding: utf-8 -*-
r"""
Nearly incompressible hyperelastic material model with active fibres.
Large deformation is described using the total Lagrangian formulation.
Models of this kind can be used in biomechanics to model biological
tissues, e.g. muscles.
Find :math:`\ul{u}` such that:
.. math::
\intl{\Omega\suz}{} \left( \ull{S}\eff(\ul{u})
+ K(J-1)\; J \ull{C}^{-1} \right) : \delta \ull{E}(\ul{v}) \difd{V}
= 0
\;, \quad \forall \ul{v} \;,
where
.. list-table::
:widths: 20 80
* - :math:`\ull{F}`
- deformation gradient :math:`F_{ij} = \pdiff{x_i}{X_j}`
* - :math:`J`
- :math:`\det(F)`
* - :math:`\ull{C}`
- right Cauchy-Green deformation tensor :math:`C = F^T F`
* - :math:`\ull{E}(\ul{u})`
- Green strain tensor :math:`E_{ij} = \frac{1}{2}(\pdiff{u_i}{X_j} +
\pdiff{u_j}{X_i} + \pdiff{u_m}{X_i}\pdiff{u_m}{X_j})`
* - :math:`\ull{S}\eff(\ul{u})`
- effective second Piola-Kirchhoff stress tensor
The effective stress :math:`\ull{S}\eff(\ul{u})` incorporates also the
effects of the active fibres in two preferential directions:
.. math::
\ull{S}\eff(\ul{u}) = \mu J^{-\frac{2}{3}}(\ull{I}
- \frac{1}{3}\tr(\ull{C}) \ull{C}^{-1})
+ \sum_{k=1}^2 \tau^k \ull{\omega}^k
\;.
The first term is the neo-Hookean term and the sum add contributions of
the two fibre systems. The tensors :math:`\ull{\omega}^k =
\ul{d}^k\ul{d}^k` are defined by the fibre system direction vectors
:math:`\ul{d}^k` (unit).
For the one-dimensional tensions :math:`\tau^k` holds simply (:math:`^k`
omitted):
.. math::
\tau = A f_{\rm max} \exp{\left\{-(\frac{\epsilon - \varepsilon_{\rm
opt}}{s})^2\right\}} \mbox{ , } \epsilon = \ull{E} : \ull{\omega}
\;.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
vf_matrix = 0.5
vf_fibres1 = 0.2
vf_fibres2 = 0.3
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
'post_process_hook' : 'stress_strain',
}
fields = {
'displacement': (nm.float64, 3, 'Omega', 1),
}
materials = {
'solid' : ({
'K' : vf_matrix * 1e3, # bulk modulus
'mu' : vf_matrix * 20e0, # shear modulus of neoHookean term
},),
'f1' : 'get_pars_fibres1',
'f2' : 'get_pars_fibres2',
}
def get_pars_fibres(ts, coors, mode=None, which=0, vf=1.0, **kwargs):
"""
Parameters
----------
ts : TimeStepper
Time stepping info.
coors : array_like
The physical domain coordinates where the parameters shound be defined.
mode : 'qp' or 'special'
Call mode.
which : int
Fibre system id.
vf : float
Fibre system volume fraction.
"""
if mode != 'qp': return
fmax = 10.0
eps_opt = 0.01
s = 1.0
tt = ts.nt * 2.0 * nm.pi
if which == 0: # system 1
fdir = nm.array([1.0, 0.0, 0.0], dtype=nm.float64)
act = 0.5 * (1.0 + nm.sin(tt - (0.5 * nm.pi)))
elif which == 1: # system 2
fdir = nm.array([0.0, 1.0, 0.0], dtype=nm.float64)
act = 0.5 * (1.0 + nm.sin(tt + (0.5 * nm.pi)))
else:
raise ValueError('unknown fibre system! (%d)' % which)
fdir.shape = (3, 1)
fdir /= nm.linalg.norm(fdir)
print(act)
shape = (coors.shape[0], 1, 1)
out = {
'fmax' : vf * nm.tile(fmax, shape),
'eps_opt' : nm.tile(eps_opt, shape),
's' : nm.tile(s, shape),
'fdir' : nm.tile(fdir, shape),
'act' : nm.tile(act, shape),
}
return out
functions = {
'get_pars_fibres1' : (lambda ts, coors, mode=None, **kwargs:
get_pars_fibres(ts, coors, mode=mode, which=0,
vf=vf_fibres1, **kwargs),),
'get_pars_fibres2' : (lambda ts, coors, mode=None, **kwargs:
get_pars_fibres(ts, coors, mode=mode, which=1,
vf=vf_fibres2, **kwargs),),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < 0.001)', 'facet'),
'Right' : ('vertices in (x > 0.099)', 'facet'),
}
##
# Dirichlet BC.
ebcs = {
'l' : ('Left', {'u.all' : 0.0}),
}
##
# Balance of forces.
integral_1 = {
'name' : 'i',
'order' : 1,
}
equations = {
'balance'
: """dw_tl_he_neohook.i.Omega( solid.mu, v, u )
+ dw_tl_bulk_penalty.i.Omega( solid.K, v, u )
+ dw_tl_fib_a.i.Omega( f1.fmax, f1.eps_opt, f1.s, f1.fdir, f1.act,
v, u )
+ dw_tl_fib_a.i.Omega( f2.fmax, f2.eps_opt, f2.s, f2.fdir, f2.act,
v, u )
= 0""",
}
def stress_strain(out, problem, state, extend=False):
from sfepy.base.base import Struct, debug
ev = problem.evaluate
strain = ev('dw_tl_he_neohook.i.Omega( solid.mu, v, u )',
mode='el_avg', term_mode='strain')
out['green_strain'] = Struct(name='output_data',
mode='cell', data=strain, dofs=None)
stress = ev('dw_tl_he_neohook.i.Omega( solid.mu, v, u )',
mode='el_avg', term_mode='stress')
out['neohook_stress'] = Struct(name='output_data',
mode='cell', data=stress, dofs=None )
stress = ev('dw_tl_bulk_penalty.i.Omega( solid.K, v, u )',
mode='el_avg', term_mode= 'stress')
out['bulk_stress'] = Struct(name='output_data',
mode='cell', data=stress, dofs=None)
return out
##
# Solvers etc.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 7,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp': 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
}
solver_2 = {
'name' : 'ts',
'kind' : 'ts.simple',
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 21, # has precedence over dt!
}
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
fba58c1d1d2a511970d286307437a86f18d54932 | 8a3f10cd8f178e8452b0f1ecf747bec40ee87b31 | /orders.py | 2d627de6e0a1a078793a69db21cc83a1d14f48eb | [] | no_license | Panda0229/flasky | 70887be89b182f458ed6f1434e2a93afac048899 | d6adb2dfe998a6836bc24443d23d3cb4ed8b8e58 | refs/heads/master | 2020-12-02T18:50:41.230430 | 2019-12-31T13:04:24 | 2019-12-31T13:04:24 | 231,085,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from flask import Blueprint
# 创建一个蓝图的对象,蓝图就是一个小模块的抽象的概念,等号左边是蓝图对,等号右边是为这个对象所起的名字,可以不同
app_orders = Blueprint("app_orders", __name__)
@app_orders.route("/get_orders")
def get_orders():
return "get orders page"
@app_orders.route("/post_orders")
def post_orders():
return "post orders page"
| [
"zhanghaining0229@163.com"
] | zhanghaining0229@163.com |
b73253fd7fdc82aacabc674fd9ab679a7a0f6a51 | 1daf07aa6e1a602d69ab2a786dca43d093803a04 | /module/module_using_sys.py | ecbb9b3638943e95bf22a6a2a0bf07396123cacf | [] | no_license | luteresa/python | 89491c90788ccfcd49f554cd8e8db8f9d0ab715f | 652dc34990f179094df64ef760fc03cc980556cd | refs/heads/master | 2020-12-02T16:15:20.813439 | 2017-07-26T08:52:07 | 2017-07-26T08:52:07 | 96,525,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import sys
import os
print('the command line arguments are:')
for i in sys.argv:
print(i)
print('\n\nThe PYTHONPATH is', sys.path,'\n')
print(os.getcwd())
for item in sys.path:
print(item)
from math import sqrt
print('Squre root of 16 is ',sqrt(16))
| [
"luteresa@163.com"
] | luteresa@163.com |
868cbe6be728a3cb624a86979fd0c364286f5f63 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayMerchantComplainReconciliationCloseModel.py | 0ef574bea5b34d603c969410707030ff70deb5cb | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,508 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMerchantComplainReconciliationCloseModel(object):
def __init__(self):
self._merchant_id = None
self._merchant_type = None
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
@property
def merchant_type(self):
return self._merchant_type
@merchant_type.setter
def merchant_type(self, value):
self._merchant_type = value
def to_alipay_dict(self):
params = dict()
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
if self.merchant_type:
if hasattr(self.merchant_type, 'to_alipay_dict'):
params['merchant_type'] = self.merchant_type.to_alipay_dict()
else:
params['merchant_type'] = self.merchant_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantComplainReconciliationCloseModel()
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
if 'merchant_type' in d:
o.merchant_type = d['merchant_type']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
168476b32a6333cbb5bb67e465d815cd3a211e1e | 76c8a2593316a74078e5ebe3c280d393b058ff67 | /vai/commands/BreakLineCommand.py | f5ae1c5829502cfe19b2fd6aa2d35ee9be32a0ea | [] | no_license | gavd89/vai | b7f746c3ba31397e8d85f477af9b9b71d01795fb | afa3a31b74ee81f9be8ab2c06cd8bdaebae1baad | refs/heads/master | 2021-01-16T22:04:05.131998 | 2014-10-31T22:35:37 | 2014-10-31T22:35:37 | 26,130,434 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | from .BufferCommand import BufferCommand
from .CommandResult import CommandResult
from .NewLineCommand import NewLineCommand
from .NewLineAfterCommand import NewLineAfterCommand
from ..models.TextDocument import LineMeta
class BreakLineCommand(BufferCommand):
def execute(self):
cursor = self._cursor
document = self._document
pos = cursor.pos
self.saveCursorPos()
if pos[1] == document.lineLength(pos[0]):
command = NewLineAfterCommand(self._buffer)
result = command.execute()
if result.success:
self._sub_command = command
return result
if pos[1] == 1:
command = NewLineCommand(self._buffer)
result = command.execute()
if result.success:
self._sub_command = command
cursor.toPos((pos[0]+1, 1))
return result
self.saveLineMemento(pos[0], BufferCommand.MEMENTO_REPLACE)
current_text = document.lineText(pos[0])
current_indent = len(current_text) - len(current_text.lstrip(' '))
document.breakLine(pos)
document.insertChars( (pos[0]+1, 1), ' '*current_indent )
cursor.toPos((pos[0]+1, current_indent+1))
line_meta = document.lineMeta(pos[0])
if line_meta.get(LineMeta.Change) == None:
document.updateLineMeta(pos[0], {LineMeta.Change: "modified"})
document.updateLineMeta(pos[0]+1, {LineMeta.Change: "added"})
return CommandResult(success=True, info=None)
def undo(self):
self.restoreCursorPos()
if self._sub_command is not None:
self._sub_command.undo()
self._sub_command = None
return
self.restoreLineMemento()
self._document.deleteLine(self._cursor.pos[0]+1)
| [
"stefano.borini@gmail.com"
] | stefano.borini@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.