blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a44003bb5206592292825279248f7d3fb178359c | 1d7bb0175edf39a04ca665c46e80fc6da8085747 | /trash/IdealGasLaw.py | 8a3b41b715b6959d03f06c8a3bfec6f7fc89ac70 | [] | no_license | ElenaGramellini/PlayingWithCEvNS | 211d54514c0fab2358ea8bc1058fe093303c366f | fb3500c2b25bdbc3d81b12d19da8d1750989f412 | refs/heads/master | 2020-07-31T16:56:53.649085 | 2019-09-24T19:46:44 | 2019-09-24T19:46:44 | 210,683,533 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | ###############################
### Important notes ###
### the v is in mm/microsec ###
### the E is in V/cm ###
###############################
import argparse
import math
R = 8.314 # m3 Pa K-1
#m = 1000 # gr
M = 20.1797 # gr/mol
pa2Atm = 9.86923e-6
def V2P(V, T,m):
p = pa2Atm*m*R*T/(V*M)
return p
import matplotlib.pyplot as plt
import numpy as np
fig1 = plt.figure(facecolor='white')
t1 = np.arange(0.1, 10.0, 0.1)
f2 = np.vectorize(V2P)
line1 = plt.plot(t1, f2(t1,100.,1000),label="T = 100 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
line2 = plt.plot(t1, f2(t1,100.,10000),label="T = 100 K, m = 10 Kg, 495 mols",linewidth=2.0)
#line3 = plt.plot(t1, f2(t1,100.,100000),label="T = 100 K, m = 100 Kg, 4950 mols",linewidth=2.0)
#line2 = plt.plot(t1, f2(t1,200.,1),label="T = 200 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
#line3 = plt.plot(t1, f2(t1,300.),label="T = 300 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
#line4 = plt.plot(t1, f2(t1,93.0),label="T = 93.0 K",linewidth=2.0)
plt.legend(bbox_to_anchor=(0.8, 0.5),
bbox_transform=plt.gcf().transFigure)
plt.grid(True)
plt.title('Ideal Gas Law Neon, molar Mass 20.2 g/mol')
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 30,
}
plt.text(1, 12, r'$PV = \frac{m}{M} RT$', fontdict=font)
plt.xlabel('Volume [m^3]')
plt.ylabel('Pressure [atm] ')
plt.show()
#plt.plot(t1, E2v(t1,87), 'bo')
| [
"elena.gramellini@yale.edu"
] | elena.gramellini@yale.edu |
ff701be8781c6fbba6a1c24f8f2dbb0e157d6411 | 455a501b6e7579a8d150d40645311433bf22d3c4 | /Day 17/q3.py | 20189d7217d9c34eb7311662bc29ede4156da973 | [] | no_license | Infinidrix/competitive-programming | e77e442b73590b9bf42a40832323d87f57bbbdf4 | 6cf7a9de7d076405990d497871bb2ccfe04fc6f3 | refs/heads/master | 2023-02-09T04:02:31.389806 | 2023-02-02T11:10:10 | 2023-02-02T11:10:10 | 222,917,959 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | def substring_adder(string, lookup):
index = 0
subsum = 0
for i in range(len(string)):
if string[i] in lookup:
index += 1
else:
subsum += (index)*(index+1)/2
index = 0
return int(subsum + (index) * (index + 1) / 2)
no_uses = input()
string = input()
lookup = input().split()
print(substring_adder(string, lookup)) | [
"biruksolomon11@gmail.com"
] | biruksolomon11@gmail.com |
71c0a2e9e86e5b8aff5a4085668128ef7b76a6eb | d64ff38360527cb1a1aa45ba2869a95cdf33ea52 | /src/vumi/webapp/api/urls.py | 69cb428ce821bf2cda3b388b61e7e337c4f7b611 | [] | no_license | smn/richmond | 9d3d8b3e52d89a71181300149f15116e0eec7e64 | 2593293ef5b8fbd659da12ff46c5b6aad1764add | refs/heads/master | 2020-05-20T12:36:59.670573 | 2010-11-15T20:45:26 | 2010-11-15T20:45:26 | 629,376 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from django.conf.urls.defaults import *
from piston.resource import Resource
from piston.authentication import HttpBasicAuthentication
from vumi.webapp.api import handlers
from vumi.webapp.api import views
ad = {'authentication': HttpBasicAuthentication(realm="Vumi")}
url_callback_resource = Resource(handler=handlers.URLCallbackHandler, **ad)
conversation_resource = Resource(handler=handlers.ConversationHandler, **ad)
urlpatterns = patterns('',
(r'^conversation\.yaml$', conversation_resource, {
'emitter_format': 'yaml'
}, 'conversation'),
(r'^account/callbacks\.json$', url_callback_resource, {}, 'url-callbacks-list'),
(r'^account/callbacks/(?P<callback_id>\d+)\.json$', url_callback_resource, {}, 'url-callback'),
(r'^callback\.html$', views.example_sms_callback, {}, 'sms-example-callback'),
)
# gateways
urlpatterns += patterns('',
(r'^sms/clickatell/',
include('vumi.webapp.api.gateways.clickatell.urls',
namespace='clickatell')),
(r'^sms/opera/',
include('vumi.webapp.api.gateways.opera.urls',
namespace='opera')),
(r'^sms/e-scape/',
include('vumi.webapp.api.gateways.e_scape.urls',
namespace='e-scape')),
(r'^sms/techsys/',
include('vumi.webapp.api.gateways.techsys.urls',
namespace='techsys')),
) | [
"simon@soocial.com"
] | simon@soocial.com |
422244505be179d682f30089b16d093e458be9c7 | 06e897ed3b6effc280eca3409907acc174cce0f5 | /plugins/filetime_from_git/content_adapter.py | e3a951272c66b56efff2754d9c4969e311d3d9ae | [
"AGPL-3.0-only",
"MIT"
] | permissive | JackMcKew/jackmckew.dev | ae5a32da4f1b818333ae15c6380bca1329d38f1e | b5d68070b6f15677a183424c84e30440e128e1ea | refs/heads/main | 2023-09-02T14:42:19.010294 | 2023-08-15T22:08:19 | 2023-08-15T22:08:19 | 213,264,451 | 15 | 8 | MIT | 2023-02-14T21:50:28 | 2019-10-07T00:18:15 | JavaScript | UTF-8 | Python | false | false | 2,755 | py | # -*- coding: utf-8 -*-
"""
Wraps a content object to provide some git information
"""
import logging
from pelican.utils import memoized
from .git_wrapper import git_wrapper
DEV_LOGGER = logging.getLogger(__name__)
class GitContentAdapter(object):
"""
Wraps a content object to provide some git information
"""
def __init__(self, content):
self.content = content
self.git = git_wrapper(".")
self.tz_name = content.settings.get("TIMEZONE", None)
self.follow = content.settings["GIT_HISTORY_FOLLOWS_RENAME"]
@memoized
def is_committed(self):
"""
Is committed
"""
return len(self.get_commits()) > 0
@memoized
def is_modified(self):
"""
Has content been modified since last commit
"""
return self.git.is_file_modified(self.content.source_path)
@memoized
def is_managed_by_git(self):
"""
Is content stored in a file managed by git
"""
return self.git.is_file_managed_by_git(self.content.source_path)
@memoized
def get_commits(self):
"""
Get all commits involving this filename
:returns: List of commits newest to oldest
"""
if not self.is_managed_by_git():
return []
return self.git.get_commits(self.content.source_path, self.follow)
@memoized
def get_oldest_commit(self):
"""
Get oldest commit involving this file
:returns: Oldest commit
"""
return self.git.get_commits(self.content.source_path, self.follow)[-1]
@memoized
def get_newest_commit(self):
"""
Get oldest commit involving this file
:returns: Newest commit
"""
return self.git.get_commits(self.content.source_path, follow=False)[0]
@memoized
def get_oldest_filename(self):
"""
Get the original filename of this content. Implies follow
"""
commit_and_name_iter = self.git.get_commits_and_names_iter(
self.content.source_path
)
_commit, name = next(commit_and_name_iter)
return name
@memoized
def get_oldest_commit_date(self):
"""
Get datetime of oldest commit involving this file
:returns: Datetime of oldest commit
"""
oldest_commit = self.get_oldest_commit()
return self.git.get_commit_date(oldest_commit, self.tz_name)
@memoized
def get_newest_commit_date(self):
"""
Get datetime of newest commit involving this file
:returns: Datetime of newest commit
"""
newest_commit = self.get_newest_commit()
return self.git.get_commit_date(newest_commit, self.tz_name)
| [
"jackmckew2@gmail.com"
] | jackmckew2@gmail.com |
f82fb02818c9fd23a4cf44fa31f43ad48cd5a419 | d3e6d6555b0314936902727af36de2f1b7432bf8 | /h-index/h-index.py | 96658ad52e376ae31f028b62e5323dcc366f65b1 | [] | no_license | fly2rain/LeetCode | 624b1e06e1aa3174dfb5c81834b58cc8fd7ad073 | 4ddb5a051c6e2051f016a675fd2f5d566c800c2a | refs/heads/master | 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null | UTF-8 | Python | false | false | 600 | py |
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort()
h_index = 0
for i in reversed(citations):
if h_index + 1 <= i:
h_index += 1
else:
return h_index
return h_index
if __name__ == '__main__':
print Solution().hIndex([3,0,6,1,5])
print Solution().hIndex([0,0,0])
print Solution().hIndex([0,6,5])
print Solution().hIndex([1])
print Solution().hIndex([1, 1])
print Solution().hIndex([])
| [
"xuzheng1111@gmail.com"
] | xuzheng1111@gmail.com |
f7a384c4dd7aed86157ed6c844fbe54c92a49c25 | 221cada2354556fbb969f25ddd3079542904ef5d | /Leetcode/109.py | fb737687d1edaa018d6025e08430617852fcba01 | [] | no_license | syzdemonhunter/Coding_Exercises | 4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d | ca71572677d2b2a2aed94bb60d6ec88cc486a7f3 | refs/heads/master | 2020-05-24T11:19:35.019543 | 2019-11-22T20:08:32 | 2019-11-22T20:08:32 | 187,245,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # https://leetcode.com/problems/convert-sorted-list-to-binary-search-tree/
# T: O(n)
# S: O(n)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
if not head:
return None
return self.to_bst(head, None)
def to_bst(self, head, tail):
if head == tail:
return None
slow = head
fast = head
while fast != tail and fast.next != tail:
fast = fast.next.next
slow = slow.next
root = TreeNode(slow.val)
root.left = self.to_bst(head, slow)
root.right = self.to_bst(slow.next, tail)
return root
| [
"syzuser60@gmail.com"
] | syzuser60@gmail.com |
31503b0c217f37c86eff34f3a100b8f183473606 | 8f75f8e91bb379cc05eded1a89a6c2f550bae6e6 | /jumpscale/data/idgenerator/idgenerator.py | dd7046fca43082a6850b88b15924070f380dcd47 | [] | no_license | despiegk/js-ng | 24c09f653ec4abfb1b997811a17c254eede89304 | e796f6ae31363e2f6daadd5ad377b5d3b116f657 | refs/heads/master | 2021-05-23T01:48:17.168454 | 2019-08-28T19:48:05 | 2019-08-28T19:48:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | import random
import string
import uuid
def random_int(from_, to):
return random.randint(from_, to)
def incrementor_id():
# user redis.incr.
raise NotImplementedError()
def guid():
return str(uuid.uuid4())
def nfromchoices(n, choices):
return "".join([random.choice(choices) for _ in range(n)])
def chars(nchars):
choices = string.ascii_letters + string.digits
return nfromchoices(nchars, choices)
def nbytes(nbytes):
out = bytearray()
for n in range(nbytes):
out.append(random_int(0, 255))
return out
def password(nchars):
choices = string.printable
return nfromchoices(nchars, choices)
def capnp_id():
"""
Generates a valid id for a capnp schema.
"""
# the bitwise is for validating the id check capnp/parser.c++
return hex(random.randint(0, 2 ** 64) | 1 << 63) | [
"xmonader@gmail.com"
] | xmonader@gmail.com |
2af736a948b077e2294b0cb97cf0ee15aeca7972 | 13ea6fa027c8ae33852bde3335846cdaab78ee71 | /DataScienceWithPython/sample_python_code/statistic/stat7.py | d760c7ef157fc2e0048a71b6ec1b276ab8263ddd | [] | no_license | dmonisankar/pythonworks | c98de04b191135451556ca9d1ee513a0a69f2edb | 4f3a14460272ec959c2f2e6975814d9ac43cb90a | refs/heads/master | 2023-03-31T00:36:46.016403 | 2020-06-11T05:39:36 | 2020-06-11T05:39:36 | 271,455,493 | 0 | 0 | null | 2021-03-20T04:19:45 | 2020-06-11T05:00:00 | Jupyter Notebook | UTF-8 | Python | false | false | 1,132 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
df = pd.read_csv('iris.csv')
df1= df.loc[df['species'] =='versicolor']
versicolor_petal_length = df1['petal_length']
# Compute ECDF for versicolor data: x_vers, y_vers
x_vers, y_vers = ecdf(versicolor_petal_length)
# Generate plot
_=plt.plot(x_vers, y_vers, marker='.', linestyle='none')
# Make the margins nice
plt.margins(0.02)
# Label the axes
_ = plt.xlabel('veriscolor petal length')
_= plt.ylabel('ECDF')
# Specify array of percentiles: percentiles
percentiles = np.array([2.5,25,50,75,97.5])
# Compute percentiles: ptiles_vers
ptiles_vers = np.percentile(versicolor_petal_length,percentiles)
# Print the result
print(ptiles_vers)
_ = plt.plot(ptiles_vers, percentiles/100, marker='D', color='red',
linestyle='none')
# Display the plot
plt.show()
| [
"das.monisankar@gmail.com"
] | das.monisankar@gmail.com |
a88f2074bcffc41af125c87593f07202ed0c0cfc | a1c9c55e1520356113a320be18e8fcb31654a944 | /archive/0.9/generated/seaborn-scatterplot-11.py | 7bdecc05f79286cfec7618fec2371fbbf86689a0 | [] | no_license | seaborn/seaborn.github.io | bac12a9255b41c7971e9e94ea393d372ef66ef62 | f70445bc3456f0216169806c2daf03452ca1eba4 | refs/heads/master | 2023-01-06T10:50:10.789810 | 2022-12-30T19:59:55 | 2022-12-30T19:59:55 | 70,731,605 | 16 | 5 | null | 2022-06-28T00:32:07 | 2016-10-12T18:56:12 | HTML | UTF-8 | Python | false | false | 170 | py | markers = {"Lunch": "s", "Dinner": "X"}
ax = sns.scatterplot(x="total_bill", y="tip", style="time",
markers=markers,
data=tips)
| [
"mwaskom@nyu.edu"
] | mwaskom@nyu.edu |
e0471aadbd2d2558d2a7e7a2b9b57fc8388cda46 | f6a24e51b6012b582d76db0b2e1e27950729b7bb | /setup.py | acd29caffd3fcd37f13c6290681e0fec2b0f9b4c | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | mathieubonnet/capsul | 391733a2391c1191b643e6847b5f757cf77c1255 | c9745e339c24fc6a27d0adcc1e0c91b355588cac | refs/heads/master | 2020-04-09T02:54:29.257904 | 2015-03-04T14:36:08 | 2015-03-04T14:36:08 | 31,950,724 | 0 | 0 | null | 2015-03-10T10:11:37 | 2015-03-10T10:11:37 | null | UTF-8 | Python | false | false | 2,326 | py | #! /usr/bin/env python
##########################################################################
# CAPSUL - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from ez_setup import use_setuptools
use_setuptools()
import os
from setuptools import find_packages, setup
import argparse
import sys
# Select which package is created: core or gui
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--gui", help="Create the gui package.",
action="store_true")
options, unknown = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
# Select appropriate modules
modules = find_packages()
core_modules = []
gui_modules = ["capsul"]
for module in modules:
if module.startswith("capsul.wip"):
continue
if module.startswith(("capsul.qt_apps", "capsul.qt_gui")):
gui_modules.append(module)
else:
core_modules.append(module)
# Set selcted package options
if options.gui:
import capsul
name_suffix = "gui"
modules = gui_modules
scripts = ["capsul/qt_apps/capsulview"]
pkgdata = {"capsul.qt_apps.resources": ["*.ui", "*.png", "*.qrc", "*.txt"]}
release_info = {}
execfile(os.path.join(os.path.dirname(capsul.__file__), "info.py"),
release_info)
else:
name_suffix = "core"
modules = core_modules
scripts = []
pkgdata = {}
release_info = {}
execfile(os.path.join("capsul", "info.py"), release_info)
# Build the setup
setup(
name="{0}-{1}".format(release_info["NAME"], name_suffix),
description=release_info["DESCRIPTION"],
long_description=release_info["LONG_DESCRIPTION"],
license=release_info["LICENSE"],
classifiers=release_info["CLASSIFIERS"],
author=release_info["AUTHOR"],
author_email=release_info["AUTHOR_EMAIL"],
version=release_info["VERSION"],
url=release_info["URL"],
packages=modules,
package_data=pkgdata,
platforms=release_info["PLATFORMS"],
extras_require=release_info["EXTRA_REQUIRES"],
install_requires=release_info["REQUIRES"],
scripts=scripts
)
| [
"antoine.grigis@cea.fr"
] | antoine.grigis@cea.fr |
ea48f51e2344745afe21a09d81054b7b5ad65438 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/create_product_request.py | a8b36ec3e8dd9957b6f1471b71a6f6b4d646caf7 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,922 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateProductRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'body': 'CreateProductRequestBody'
}
attribute_map = {
'instance_id': 'instance_id',
'body': 'body'
}
def __init__(self, instance_id=None, body=None):
"""CreateProductRequest
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param body: Body of the CreateProductRequest
:type body: :class:`huaweicloudsdkroma.v2.CreateProductRequestBody`
"""
self._instance_id = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this CreateProductRequest.
实例ID
:return: The instance_id of this CreateProductRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this CreateProductRequest.
实例ID
:param instance_id: The instance_id of this CreateProductRequest.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def body(self):
"""Gets the body of this CreateProductRequest.
:return: The body of this CreateProductRequest.
:rtype: :class:`huaweicloudsdkroma.v2.CreateProductRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateProductRequest.
:param body: The body of this CreateProductRequest.
:type body: :class:`huaweicloudsdkroma.v2.CreateProductRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateProductRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
5e180554a5c3e85632e5b64aed8b28f0e3a7121f | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/pose/alphapose/pytorch/trackers/utils/transform.py | 1017f4807614151a663a8f84628b8d568304a987 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 5,633 | py | # encoding: utf-8
import torchvision.transforms as T
import math
import random
import torch
import cv2
import numpy as np
#from .functional import to_tensor
#from .transforms import *
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=255 * (0.49735, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
img = np.asarray(img, dtype=np.float32).copy()
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.shape[0] * img.shape[1]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[1] and h < img.shape[0]:
x1 = random.randint(0, img.shape[0] - h)
y1 = random.randint(0, img.shape[1] - w)
if img.shape[2] == 3:
img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0]
img[x1:x1 + h, y1:y1 + w, 1] = self.mean[1]
img[x1:x1 + h, y1:y1 + w, 2] = self.mean[2]
else:
img[x1:x1 + h, y1:y1 + w, 0] = self.mean[0]
return img
return img
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
assert len(pic.shape) in (2, 3)
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
def build_transforms(cfg, is_train=True):
res = []
res.append(T.ToPILImage(mode=None))
if is_train:
size_train = cfg["SIZE_TRAIN"]
# filp lr
do_flip = cfg["DO_FLIP"]
flip_prob = cfg["FLIP_PROB"]
# padding
do_pad = cfg["DO_PAD"]
padding = cfg["PADDING"]
padding_mode = cfg["PADDING_MODE"]
# random erasing
do_re = cfg["RE_ENABLED"]
#re_prob = cfg["RE_PROB"]
#re_mean = cfg["RE_MEAN"]
res.append(T.Resize(size_train, interpolation=3))
if do_flip:
res.append(T.RandomHorizontalFlip(p=flip_prob))
if do_pad:
res.extend([T.Pad(padding, padding_mode=padding_mode),
T.RandomCrop(size_train)])
if do_re:
#res.append(T.RandomErasing(probability=re_prob, mean=re_mean))
res.append(RandomErasing())
# if cfg.INPUT.CUTOUT.DO:
# res.append(Cutout(probability=cfg.INPUT.CUTOUT.PROB, size=cfg.INPUT.CUTOUT.SIZE,
# mean=cfg.INPUT.CUTOUT.MEAN))
else:
size_test = cfg["TEST_SIZE"]
res.append(T.Resize(size_test, interpolation=3))
res.append(ToTensor())
return T.Compose(res)
| [
"mingjiang.li@iluvatar.ai"
] | mingjiang.li@iluvatar.ai |
7d9e4da203e7b0fe44f41edd27aba9153d1d6ac4 | 3cf0d750948a758d5771dd778fbb783d64a044ae | /src/pads/pads/smawk.py | 948ab4c241686f0469e09ee2a3ff4dddbd1bbb79 | [
"MIT",
"CC-BY-NC-SA-4.0",
"Apache-2.0"
] | permissive | hbulpf/pydemo | 6552a08b3c85721ac1b2ba335b030e234ad03b6c | ea3e9f9086116a86ecef803e9e3179a34c94c20f | refs/heads/master | 2022-11-30T21:06:29.933820 | 2022-01-15T17:05:16 | 2022-01-15T17:05:16 | 237,584,300 | 6 | 1 | Apache-2.0 | 2022-11-22T09:49:38 | 2020-02-01T08:20:43 | Python | UTF-8 | Python | false | false | 7,823 | py | """SMAWK.py
Totally monotone matrix searching algorithms.
The offline algorithm in ConcaveMinima is from Agarwal, Klawe, Moran,
Shor, and Wilbur, Geometric applications of a matrix searching algorithm,
Algorithmica 2, pp. 195-208 (1987).
The online algorithm in OnlineConcaveMinima is from Galil and Park,
A linear time algorithm for concave one-dimensional dynamic programming,
manuscript, 1989, which simplifies earlier work on the same problem
by Wilbur (J. Algorithms 1988) and Eppstein (J. Algorithms 1990).
D. Eppstein, March 2002, significantly revised August 2005
"""
def ConcaveMinima(RowIndices, ColIndices, Matrix):
"""
Search for the minimum value in each column of a matrix.
The return value is a dictionary mapping ColIndices to pairs
(value,rowindex). We break ties in favor of earlier rows.
The matrix is defined implicitly as a function, passed
as the third argument to this routine, where Matrix(i,j)
gives the matrix value at row index i and column index j.
The matrix must be concave, that is, satisfy the property
Matrix(i,j) > Matrix(i',j) => Matrix(i,j') > Matrix(i',j')
for every i<i' and j<j'; that is, in every submatrix of
the input matrix, the positions of the column minima
must be monotonically nondecreasing.
The rows and columns of the matrix are labeled by the indices
given in order by the first two arguments. In most applications,
these arguments can simply be integer ranges.
"""
# Base case of recursion
if not ColIndices:
return {}
# Reduce phase: make number of rows at most equal to number of cols
stack = []
for r in RowIndices:
while len(stack) >= 1 and \
Matrix(stack[-1], ColIndices[len(stack) - 1]) \
> Matrix(r, ColIndices[len(stack) - 1]):
stack.pop()
if len(stack) != len(ColIndices):
stack.append(r)
RowIndices = stack
# Recursive call to search for every odd column
minima = ConcaveMinima(RowIndices,
[ColIndices[i]
for i in range(1, len(ColIndices), 2)],
Matrix)
# Go back and fill in the even rows
r = 0
for c in range(0, len(ColIndices), 2):
col = ColIndices[c]
row = RowIndices[r]
if c == len(ColIndices) - 1:
lastrow = RowIndices[-1]
else:
lastrow = minima[ColIndices[c + 1]][1]
pair = (Matrix(row, col), row)
while row != lastrow:
r += 1
row = RowIndices[r]
pair = min(pair, (Matrix(row, col), row))
minima[col] = pair
return minima
class OnlineConcaveMinima:
"""
Online concave minimization algorithm of Galil and Park.
OnlineConcaveMinima(Matrix,initial) creates a sequence of pairs
(self.value(j),self.index(j)), where
self.value(0) = initial,
self.value(j) = min { Matrix(i,j) | i < j } for j > 0,
and where self.index(j) is the value of j that provides the minimum.
Matrix(i,j) must be concave, in the same sense as for ConcaveMinima.
We never call Matrix(i,j) until value(i) has already been computed,
so that the Matrix function may examine previously computed values.
Calling value(i) for an i that has not yet been computed forces
the sequence to be continued until the desired index is reached.
Calling iter(self) produces a sequence of (value,index) pairs.
Matrix(i,j) should always return a value, rather than raising an
exception, even for j larger than the range we expect to compute.
If j is out of range, a suitable value to return that will not
violate concavity is Matrix(i,j) = -i. It will not work correctly
to return a flag value such as None for large j, because the ties
formed by the equalities among such flags may violate concavity.
"""
def __init__(self, Matrix, initial):
"""Initialize a OnlineConcaveMinima object."""
# State used by self.value(), self.index(), and iter(self)
self._values = [initial] # tentative solution values...
self._indices = [None] # ...and their indices
self._finished = 0 # index of last non-tentative value
# State used by the internal algorithm
#
# We allow self._values to be nonempty for indices > finished,
# keeping invariant that
# (1) self._values[i] = Matrix(self._indices[i], i),
# (2) if the eventual correct value of self.index(i) < base,
# then self._values[i] is nonempty and correct.
#
# In addition, we keep a column index self._tentative, such that
# (3) if i <= tentative, and the eventual correct value of
# self.index(i) <= finished, then self._values[i] is correct.
#
self._matrix = Matrix
self._base = 0
self._tentative = 0
def __iter__(self):
"""Loop through (value,index) pairs."""
i = 0
while True:
yield self.value(i), self.index(i)
i += 1
def value(self, j):
"""Return min { Matrix(i,j) | i < j }."""
while self._finished < j:
self._advance()
return self._values[j]
def index(self, j):
"""Return argmin { Matrix(i,j) | i < j }."""
while self._finished < j:
self._advance()
return self._indices[j]
def _advance(self):
"""Finish another value,index pair."""
# First case: we have already advanced past the previous tentative
# value. We make a new tentative value by applying ConcaveMinima
# to the largest square submatrix that fits under the base.
i = self._finished + 1
if i > self._tentative:
rows = range(self._base, self._finished + 1)
self._tentative = self._finished + len(rows)
cols = range(self._finished + 1, self._tentative + 1)
minima = ConcaveMinima(rows, cols, self._matrix)
for col in cols:
if col >= len(self._values):
self._values.append(minima[col][0])
self._indices.append(minima[col][1])
elif minima[col][0] < self._values[col]:
self._values[col], self._indices[col] = minima[col]
self._finished = i
return
# Second case: the new column minimum is on the diagonal.
# All subsequent ones will be at least as low,
# so we can clear out all our work from higher rows.
# As in the fourth case, the loss of tentative is
# amortized against the increase in base.
diag = self._matrix(i - 1, i)
if diag < self._values[i]:
self._values[i] = diag
self._indices[i] = self._base = i - 1
self._tentative = self._finished = i
return
# Third case: row i-1 does not supply a column minimum in
# any column up to tentative. We simply advance finished
# while maintaining the invariant.
prev_row = self._matrix(i - 1, self._tentative)
tentative_value = self._values[self._tentative]
if prev_row >= tentative_value:
self._finished = i
return
# Fourth and final case: a new column minimum at self._tentative.
# This allows us to make progress by incorporating rows
# prior to finished into the base. The base invariant holds
# because these rows cannot supply any later column minima.
# The work done when we last advanced tentative (and undone by
# this step) can be amortized against the increase in base.
self._base = i - 1
self._tentative = self._finished = i
return
| [
"hudalpf@163.com"
] | hudalpf@163.com |
2b9fce0d1039592d118c8db220b31dfeda6b619d | 161fd6370ffa0b35ecd50719d6266224da597ee0 | /Python/Django/ninjaGold/apps/ninjagold/urls.py | 402d56e647982cf8b69ddf3579a56486e220d103 | [] | no_license | ebergstein/DojoAssignments | a30fd8b36442bff2a4253902a591ad11f191fc12 | 3ad9ac65073c733ead32b93ce4be19af5369fccf | refs/heads/master | 2021-06-19T09:48:23.100713 | 2017-06-30T04:24:35 | 2017-06-30T04:24:35 | 82,743,546 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^farm$', views.farm),
url(r'^cave$', views.cave),
url(r'^house$', views.house),
url(r'^casino$', views.casino)
] | [
"ebergstein@sbcglobal.net"
] | ebergstein@sbcglobal.net |
ce7f5d0af87d499a9e90d621f57bd18256c57e02 | b998f07d5be9a339ee9d93f4143209246fc0613e | /docs/histogram/blur1.py | 1a09122fad5db92539484274aed6f898a5adf0d4 | [] | no_license | s-cosseddu/opencv-tutorial | a48c48c08efad2746dc1ff8ca1d9ecd1ef9e80b2 | be6c1da81d1cfaf1b47f1873adf0fdb50a7ab84c | refs/heads/master | 2023-02-07T03:08:29.968456 | 2020-02-09T20:05:02 | 2020-02-09T20:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Blurring
import cv2 as cv
def trackbar(x):
x = cv.getTrackbarPos('blur x','window')
y = cv.getTrackbarPos('blur x','window')
blurred = cv.blur(img, (x, y))
cv.imshow('window', blurred)
cv.displayOverlay('window', f'blur = ({x}, {y})')
img = cv.imread('lego.png')
cv.imshow('window', img)
cv.createTrackbar('blur x', 'window', 0, 4, trackbar)
cv.createTrackbar('blur y', 'window', 0, 4, trackbar)
cv.waitKey(0)
cv.destroyAllWindows() | [
"raphael.holzer@citycable.ch"
] | raphael.holzer@citycable.ch |
0e1ddf5c4cfad22d936a9c52c81dc25d7c5a267e | ce083b3b29f8218854c5b5171949f257558edfd3 | /raterapi/views/game.py | 7a7a9dd7c41fbd4c9e2f5f4c7dea59ba45095254 | [] | no_license | MarkyAaronYoung/raterproject | 195082d63bfb063b34b65a27850fd5211b47e5cc | 0c6b675f09a786d88f341641fab513c998699ad7 | refs/heads/main | 2023-02-02T20:24:24.581964 | 2020-12-05T18:08:25 | 2020-12-05T18:08:25 | 313,803,061 | 0 | 0 | null | 2020-12-05T18:08:26 | 2020-11-18T02:38:51 | Python | UTF-8 | Python | false | false | 2,275 | py | from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from raterapi.models import Game, Player, Category, Review
class GamesViewSet(ViewSet):
def list(self, request):
games = Game.objects.all()
serializer = GameSerializer(games, many=True, context={'request': request})
return Response(serializer.data)
def create(self, request):
game = Game()
game.title = request.data["title"]
game.number_of_players = request.data["numberOfPlayers"]
game.year_released = request.data["yearReleased"]
game.age_rec = request.data["ageRec"]
game.play_time = request.data["playTime"]
game.game_pic = request.data["gamePic"]
game.rating = request.data["rating"]
game.designer = request.data["designer"]
category = Category.objects.get(pk=request.data["categoryId"])
game.category = category
try:
game.save()
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data)
except ValidationError as ex:
return Response({"reason": ex.message}, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handle GET requests for single game
Returns:
Response -- JSON serialized game instance
"""
try:
game = Game.objects.get(pk=pk)
serializer = GameSerializer(game, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex)
class GameSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Game
url = serializers.HyperlinkedIdentityField(
view_name='game',
lookup_field='id'
)
fields = ('id', 'url', 'title', 'designer', 'description', 'year_released', 'number_of_players', 'play_time', 'age_rec', 'category', 'game_pic', 'rating')
depth = 1
| [
"markyaaronyoung@gmail.com"
] | markyaaronyoung@gmail.com |
8fe3baf1366251c0f42785474df76f23f3704ed1 | da96d29b457eb123c01274efea562448df105fc6 | /chapter6/st8.py | 527f6d571dd909e8aa75709b917a4ccdabec9642 | [] | no_license | Alonsovau/sketches | a1336f1a7909ad059744c4613ab992c8361264f5 | dfb072086cc813d7409fa11393ebaad6e26db180 | refs/heads/master | 2021-01-19T22:29:15.827896 | 2017-10-19T15:37:28 | 2017-10-19T15:37:28 | 88,761,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # 与关系型数据库的交互
import sqlite3
stocks = [
('GOOG', 100, 490.1),
('AAPL', 50, 545.75),
('FB', 150, 7.45),
('HPQ', 76, 33.2)
]
db = sqlite3.connect('database.db')
c = db.cursor()
# print(c.execute('create table portfolio (symbol text, shares integer, price real)'))
# db.commit()
# c.executemany('insert into portfolio values(?,?,?)', stocks)
# db.commit()
for row in db.execute('select * from portfolio'):
print(row)
print('-----------------')
min_price = 100
for row in db.execute('select * from portfolio where price >= ?', (min_price,)):
print(row)
| [
"alonsovau@outlook.com"
] | alonsovau@outlook.com |
af0ea6669de535070a72eb729a27acc46e30001c | 12f006a0e5d75ef2349d4ae519c1c9cac5309761 | /Solution_30.py | aba2dee1902b3a33bd33369a0a08ad7a470b376b | [] | no_license | TimothySjiang/leetcodepy | c613db16282eade713e01b7d641c0f5b341ec84b | ef64e46b8833a684b8b0355ce576b767a0e03596 | refs/heads/master | 2020-07-01T14:48:35.953841 | 2020-01-12T06:19:44 | 2020-01-12T06:19:44 | 201,199,810 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
if not words: return []
dic = collections.Counter(words)
wlength = len(words[0])
res = []
for i in range(wlength):
window = collections.Counter()
count = 0
for j in range(i, len(s), wlength):
word = s[j:j + wlength]
if word in dic:
window[word] += 1
count += 1
while window[word] > dic[word]:
pos = j - wlength * (count - 1)
rword = s[pos:pos + wlength]
window[rword] -= 1
count -= 1
else:
window = collections.Counter()
count = 0
if count == len(words):
res.append(j - wlength * (count - 1))
return res | [
"shjiang@ucdavis.edu"
] | shjiang@ucdavis.edu |
8061a30617a92741c6620ee3fc796b7d0247231e | 180a3795a115c0da71078f81efbde45ab2025ca0 | /interview/头条/old/b.py | c64fd56725c2a7169db3defd92ff17ef9da526c9 | [] | no_license | lizhe960118/Machine-Learning | a7593e6788433408bcf072e5e25672debd931ee4 | 2d6fe2373839964645d632895ed2a7dcb9de48b0 | refs/heads/master | 2020-03-31T15:53:57.408037 | 2019-08-18T12:29:11 | 2019-08-18T12:29:11 | 152,355,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | N = int(input())
delay_time = [[int(a) for a in input().split()] for _ in range(N)]
a, b, k = [int(a) for a in input().split()]
def flayAlgoritm(graph):
minDistance = [[0 for _ in range(len(graph[0]))] for _ in range(len(graph))]
N = len(graph)
for i in range(N):
for j in range(N):
minDistance[i][j] = graph[i][j]
for k in range(N):
for i in range(N):
for j in range(N):
if minDistance[i][j] > minDistance[i][k] + minDistance[k][j]:
minDistance[i][j] = minDistance[i][k] + minDistance[k][j]
return minDistance
minDistance = flayAlgoritm(delay_time)
temp = [delay_time[i][j] for i in range(len(delay_time)) for j in range(len(delay_time[0])) if i > j]
min_delay = min(temp)
t = k - min_delay
if t <= 0:
print(-1)
else:
for i in range(t):
print(i) | [
"2957308424@qq.com"
] | 2957308424@qq.com |
6f35d96d98a3368b68951d18321d0ae5ca68ebb6 | 68cecfdf90585d8fe7a705c10521d2e2cec80b8a | /apps/courses/migrations/0005_auto_20180814_1824.py | d643a4ae760016f9b51d0ae226bd67a23268d94c | [] | no_license | balloontmz/mooc | e3b8759a76879f321c55c98c8e07b1200cd18c9a | 4f01f82445f4b5e85a700793828eb5f969875814 | refs/heads/master | 2020-03-25T11:31:21.953098 | 2018-08-20T05:21:25 | 2018-08-20T05:21:25 | 143,736,149 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | # Generated by Django 2.0.1 on 2018-08-14 18:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0006_auto_20180812_1555'),
('courses', '0004_auto_20180813_2135'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='讲师'),
),
migrations.AddField(
model_name='course',
name='teacher_tell',
field=models.CharField(default='什么都可以学到,按时交作业,不然叫家长', max_length=300, verbose_name='=老师告诉你'),
),
migrations.AddField(
model_name='course',
name='you_need_know',
field=models.CharField(default='一颗勤学的心是本课程的必要前提', max_length=300, verbose_name='=课程须知'),
),
migrations.AddField(
model_name='video',
name='learn_times',
field=models.IntegerField(default=0, verbose_name='学习时长(分钟数)'),
),
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='http://blog.mtianyan.cn/', max_length=200, verbose_name='视频地址'),
),
]
| [
"15111171986@163.com"
] | 15111171986@163.com |
544cfca42ef60962f6e782c20d5e90e3cc8a535c | 97f9e29696000f45330fcad4c6a8d26bb8231708 | /good_point.py | 88962fb5d7084ab23b692471fdcd8e1f33284ae5 | [] | no_license | Ceasar/lecture | 5c5419404b08c8cb8f5b37e069db40e9146059b9 | d1143a0405d9dd2432d5c0cf14cf3ac2f9c18441 | refs/heads/master | 2021-01-20T12:20:45.793808 | 2012-02-28T04:08:46 | 2012-02-28T04:08:46 | 3,562,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from math import sin, cos, pi, atan2
class Point(object):
def __init__(self, r, theta):
self.r = r
self.theta = theta
@property
def x(self):
return round(self.r * cos(self.theta))
@x.setter
def x(self, val):
self.r = round(((val * val) + (self.y * self.y)) ** 0.5)
self.theta = round(atan2(self.y, val))
@property
def y(self):
return round(self.r * sin(self.theta))
def rotate(self, theta):
self.theta += theta
def __str__(self):
return "x = %s; y = %s; r = %s; theta = %s;" % (self.x, self.y, self.r, self.theta)
if __name__ == "__main__":
p = Point(1, pi / 2)
print p
p.rotate(pi / 2)
print p
# so far so good
p.x = 10
print p
# right!
# now try setting y...
| [
"cbautista2010@gmail.com"
] | cbautista2010@gmail.com |
9baff6f38d64c4d58a9e972830a5bb3cefa44344 | e4e79bb3bc69c89fbc0429df37ef26fef6a49592 | /testproject/testproject/urls.py | 1b2a05bb96c78a88199932a368c18ecf199109ea | [
"Apache-2.0"
] | permissive | jluttine/django-nyt | ee78a4f55fb7109a5e9dca40f3a69cc58ac6a1b6 | 660f9c387cc1c363ab26e3ab2812da098d086876 | refs/heads/master | 2020-12-28T21:17:00.547751 | 2014-10-15T10:33:33 | 2014-10-15T10:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
from django_nyt.urls import get_pattern as get_nyt_pattern
urlpatterns += patterns('',
(r'^nyt/', get_nyt_pattern()),
)
| [
"benjaoming@gmail.com"
] | benjaoming@gmail.com |
9efc7e03791547d91a33989077adbe2056566a48 | 1afec7d1d3099138b5afe5fd73dfd3d24ff4eb15 | /test/functional/rpc_invalid_address_message.py | f9149e01f98e473b42a9825372cdf9eb1bccdde4 | [
"MIT"
] | permissive | republic-productions/finalcoin | 5c7c6b0734178fe22db63f0946ec555f59e8d0eb | 7c0f335ded1e5c662034c822ca2c474b8e62778f | refs/heads/main | 2023-09-04T17:04:32.683667 | 2021-10-14T17:45:22 | 2021-10-14T17:45:22 | 417,209,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | #!/usr/bin/env python3
# Copyright (c) 2020 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test error messages for 'getaddressinfo' and 'validateaddress' RPC commands."""
from test_framework.test_framework import FinalcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
BECH32_VALID = 'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv'
BECH32_INVALID_BECH32 = 'bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqdmchcc'
BECH32_INVALID_BECH32M = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7k35mrzd'
BECH32_INVALID_VERSION = 'bcrt130xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqynjegk'
BECH32_INVALID_SIZE = 'bcrt1s0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7v8n0nx0muaewav25430mtr'
BECH32_INVALID_V0_SIZE = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kqqq5k3my'
BECH32_INVALID_PREFIX = 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'
BASE58_VALID = 'mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn'
BASE58_INVALID_PREFIX = '17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem'
INVALID_ADDRESS = 'asfah14i8fajz0123f'
class InvalidAddressErrorMessageTest(FinalcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_validateaddress(self):
node = self.nodes[0]
# Bech32
info = node.validateaddress(BECH32_INVALID_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address data size')
info = node.validateaddress(BECH32_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Bech32 address')
info = node.validateaddress(BECH32_INVALID_BECH32)
assert not info['isvalid']
assert_equal(info['error'], 'Version 1+ witness address must use Bech32m checksum')
info = node.validateaddress(BECH32_INVALID_BECH32M)
assert not info['isvalid']
assert_equal(info['error'], 'Version 0 witness address must use Bech32 checksum')
info = node.validateaddress(BECH32_INVALID_V0_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 v0 address data size')
info = node.validateaddress(BECH32_VALID)
assert info['isvalid']
assert 'error' not in info
info = node.validateaddress(BECH32_INVALID_VERSION)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address witness version')
# Base58
info = node.validateaddress(BASE58_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Base58-encoded address')
info = node.validateaddress(BASE58_VALID)
assert info['isvalid']
assert 'error' not in info
# Invalid address format
info = node.validateaddress(INVALID_ADDRESS)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid address format')
def test_getaddressinfo(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Invalid Bech32 address data size", node.getaddressinfo, BECH32_INVALID_SIZE)
assert_raises_rpc_error(-5, "Invalid prefix for Bech32 address", node.getaddressinfo, BECH32_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", node.getaddressinfo, BASE58_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid address format", node.getaddressinfo, INVALID_ADDRESS)
def run_test(self):
self.test_validateaddress()
if self.is_wallet_compiled():
self.init_wallet(0)
self.test_getaddressinfo()
if __name__ == '__main__':
InvalidAddressErrorMessageTest().main()
| [
"republicproductions@protonmail.com"
] | republicproductions@protonmail.com |
013ab9d306c6cde353ef76978b48a2f6e11b8d30 | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py | 971bfbecc2e217ffdbb4d29f36abed2a9970e642 | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 3,239 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
def _get_constraint_names(engine_name):
# NOTE(vish): These constraint names may be dependent on the backend, but
# there doesn't seem to be we a way to determine the proper
# name for existing constraints. These names are correct for
# mysql and postgres.
if engine_name == "mysql":
return {
"instance_types_name": ("name", "instance_types_name_key"),
"instance_types_flavorid": "instance_types_flavorid_str_key",
"volume_types_name": "name",
}
else:
return {
"instance_types_name": ("instance_types_name_key",),
"instance_types_flavorid": "instance_types_flavorid_str_key",
"volume_types_name": "volume_types_name_key",
}
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
c_names = _get_constraint_names(migrate_engine.name)
table = Table('instance_types', meta, autoload=True)
for constraint_name in c_names['instance_types_name']:
cons = UniqueConstraint('name',
name=constraint_name,
table=table)
cons.drop()
cons = UniqueConstraint('flavorid',
name=c_names['instance_types_flavorid'],
table=table)
cons.drop()
table = Table('volume_types', meta, autoload=True)
cons = UniqueConstraint('name',
name=c_names['volume_types_name'],
table=table)
cons.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
c_names = _get_constraint_names(migrate_engine.name)
table = Table('instance_types', meta, autoload=True)
for constraint_name in c_names['instance_types_name']:
cons = UniqueConstraint('name',
name=constraint_name,
table=table)
cons.create()
table = Table('instance_types', meta, autoload=True)
cons = UniqueConstraint('flavorid',
name=c_names['instance_types_flavorid'],
table=table)
cons.create()
table = Table('volume_types', meta, autoload=True)
cons = UniqueConstraint('name',
name=c_names['volume_types_name'],
table=table)
cons.create()
| [
"dkang@isi.edu"
] | dkang@isi.edu |
0b294d1d60dd4bdf4271c352d5336b20def7191a | 6b2e5e0d21601c61a84afb7164125fb1dc16c7aa | /docs/fonts.py | 09b4151ff1b2607cdb65c6d41b344e93565b57f9 | [
"MIT"
] | permissive | KOLANICH-libs/proplot | aa42b7e8bf4df5f425ea19d31fca2afd15d9a11f | a71e4f8fd57410eee96ba4ce701b8290541a3b28 | refs/heads/master | 2023-07-08T16:44:01.227438 | 2021-07-04T20:28:52 | 2021-07-04T20:28:52 | 313,345,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,502 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
#
# Font selection
# ==============
#
# ProPlot registers several new fonts and includes tools for adding
# your own fonts. These features are described below.
#
#
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_fonts:
#
# Included fonts
# --------------
#
# Matplotlib provides a `~matplotlib.font_manager` module for working with
# system fonts and classifies fonts into `five font families\
# <https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/fonts_demo.html>`__:
# :rcraw:`font.serif` :rcraw:`font.sans-serif`, :rcraw:`font.monospace`,
# :rcraw:`font.cursive`, and :rcraw:`font.fantasy`. The default font family
# is sans-serif, because sans-serif fonts are generally more suitable for
# figures than serif fonts, and the default font name belonging to this family
# is `DejaVu Sans <https://dejavu-fonts.github.io>`__, which comes packaged with
# matplotlib.
#
# Matplotlib uses DejaVu Sans in part because it includes glyphs for a very wide
# range of symbols, especially mathematical symbols. However DejaVu Sans is seldom
# used outside of matplotlib and (in our opinion) is not very aesthetically pleasing.
# To improve the font selection while keeping things consistent across different
# workstations, ProPlot comes packaged with the open-source
# `TeX Gyre font series <https://ctan.org/pkg/tex-gyre?lang=en>`__
# and adds them as the default entries for all of matplotlib's font famlies:
#
# * The `Century <https://en.wikipedia.org/wiki/Century_type_family>`__ lookalike
# :rcraw:`font.serif` = ``'TeX Gyre Schola'``.
# * The `Helvetica <https://en.wikipedia.org/wiki/Helvetica>`__ lookalike
# :rcraw:`font.sans-serif` = ``'TeX Gyre Heros'``.
# * The `Courier <https://en.wikipedia.org/wiki/Courier_(typeface)>`__ lookalike
# :rcraw:`font.monospace` = ``'TeX Gyre Cursor'``.
# * The `Chancery <https://en.wikipedia.org/wiki/ITC_Zapf_Chancery>`__ lookalike
# :rcraw:`font.cursive` = ``'TeX Gyre Chorus'``.
# * The `Avant Garde <https://en.wikipedia.org/wiki/ITC_Avant_Garde>`__ lookalike
# :rcraw:`font.fantasy` = ``'TeX Gyre Adventor'``.
#
# After importing ProPlot, the default matplotlib font will be
# `TeX Gyre Heros <https://ctan.org/pkg/tex-gyre-heros>`__,
# which emulates the more conventional and aesthetically pleasing font
# `Helvetica <https://en.wikipedia.org/wiki/Helvetica>`__. The
# full font priority lists for each family are displayed in the
# :ref:`default proplotrc file <ug_proplotrc>`.
#
# To compare different fonts, use the `~proplot.demos.show_fonts` command. By
# default, this displays the *sans serif* fonts available on your system and
# packaged with ProPlot. The sans serif table on the RTD server is shown
# below. The "¤" symbol appears where characters for a particular font are
# unavailable (when making plots, "¤" is replaced with the character from
# a fallback font). Since most TeX Gyre fonts have limited
# character sets, if your plots contain lots of mathematical symbols,
# you may want to set :rcraw:`font.family` to DejaVu Sans or
# `Fira Math <https://github.com/firamath/firamath>`__, which is packaged
# with ProPlot.
#
# .. note::
#
# Try to avoid ``.ttf`` files with ``Thin`` in the file name. Some versions of
# matplotlib interpret fonts with the "thin" style as having *normal* weight (see
# `this issue page <https://github.com/matplotlib/matplotlib/issues/8788>`__),
# causing them to override the correct normal weight versions. While ProPlot
# tries to filter out these files, this cannot be done systematically. In the
# below example, the "Roboto" font may be overridden by its "thin" version
# because the RTD server includes this style.
# %%
import proplot as plot
fig, axs = plot.show_fonts()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_fonts_user:
#
# Using your own fonts
# --------------------
#
# You can register your own fonts by adding files to the ``~/.proplot/fonts``
# directory and calling `~proplot.config.register_fonts`. This command is
# also called on import. To change the default font, use the
# `~proplot.config.rc` object or modify your ``~/.proplotrc``. See
# the :ref:`configuration section <ug_config>` for details.
#
# Sometimes the font you would like to use *is* installed, but the font file
# is not stored under the matplotlib-compatible ``.ttf``, ``.otf``, or ``.afm``
# formats. For example, several macOS fonts are unavailable because they are
# stored as ``.dfont`` collections. Also, while matplotlib nominally supports
# ``.ttc`` collections, ProPlot ignores them because figures with ``.ttc`` fonts
# `cannot be saved as PDFs <https://github.com/matplotlib/matplotlib/issues/3135>`__.
# You can get matplotlib to use ``.dfont`` and ``.ttc`` collections by
# expanding them into individual ``.ttf`` files with the
# `DFontSplitter application <https://peter.upfold.org.uk/projects/dfontsplitter>`__,
# then saving the files in-place or in the ``~/.proplot/fonts`` folder.
#
# To find font collections, check the paths listed in ``OSXFontDirectories``,
# ``X11FontDirectories``, ``MSUserFontDirectories``, and ``MSFontDirectories``
# under the `matplotlib.font_manager` module.
| [
"lukelbd@gmail.com"
] | lukelbd@gmail.com |
5ad9089e3a6c24447ab2f79b46959cfe3b4a7c7c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch27_2020_03_23_13_19_25_574676.py | c03bdeee5a7baf9d85caf9cc133ea720c209e773 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | invalid= True
while invalid:
perg = str(input('Tem dúvidas?')
if perg == não:
invalid= False
print ('Até a próxima')
else:
print ('Pratique mais')
| [
"you@example.com"
] | you@example.com |
f5df3d660900f3cef91489b44b8804976af6e0f4 | e76f47d5e6752b838d5f7e23e22cfef65482b8e1 | /env/bin/gdalchksum.py | 765bd72c6a86cacb639d07cd13700de5e0db7da6 | [] | no_license | AmirIdris/Final-Project | b006adfc4074df6687abaac83942b1b151300a51 | 7b0e28d01b7d5b4e4825d5d8b98ba193bd3f49e8 | refs/heads/master | 2023-06-10T21:13:12.875771 | 2021-07-08T20:23:59 | 2021-07-08T20:23:59 | 362,912,491 | 0 | 1 | null | 2021-07-08T20:24:00 | 2021-04-29T18:34:24 | CSS | UTF-8 | Python | false | false | 333 | py | #!/home/amir/Documents/Projects/FinalProject/env/bin/python3
import sys
# import osgeo.utils.gdalchksum as a convenience to use as a script
from osgeo.utils.gdalchksum import * # noqa
from osgeo.utils.gdalchksum import main
from osgeo.gdal import deprecation_warn
deprecation_warn('gdalchksum', 'utils')
sys.exit(main(sys.argv))
| [
"you@example.com"
] | you@example.com |
43a8958ceb7903ceb9c6b6d5ccebaaebc00206dc | 2db6f646b23c1bdbdf0e6a7b9889725a7eda4e98 | /test/functional/nulldummy.py | d29132890ca14d06c43cfc54ecb99188b66d039a | [
"MIT"
] | permissive | wolfoxonly/bwb | 113964cbce9ae8ce048bfcd81437f7bcfdb22e11 | aae01441cdc171ff7bbdc161b74b4eeb2f1b5a10 | refs/heads/master | 2021-04-30T08:00:28.465159 | 2018-05-21T14:47:53 | 2018-05-21T14:47:53 | 121,363,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,692 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bwbcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.test_framework import BwbcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BwbcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness', '-vbparams=segwit:0:999999999999', '-addresstype=legacy']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
network_thread_start()
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| [
"415313577@qq.com"
] | 415313577@qq.com |
2994c87a400699c9154a25a17bbe59a61473a769 | dd9f40550afd05192f04d817fa7b31bbe5945f8a | /app_1/migrations/0006_auto_20210921_1828.py | 52d2866997337ab4f915b145c72565b16af42ed8 | [] | no_license | AbdurRahman111/Boomboom-Official-Project---Official | b8c0220c61e204b8482227ffec3fc0f5ebd69f37 | ff00b702494183e13bc00b634ed33a5203536166 | refs/heads/master | 2023-08-19T04:19:48.334410 | 2021-10-11T12:24:53 | 2021-10-11T12:24:53 | 415,915,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | # Generated by Django 3.2.6 on 2021-09-21 12:28
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_1', '0005_auto_20210921_1635'),
]
operations = [
migrations.AlterField(
model_name='campaign_table',
name='end_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099), null=True),
),
migrations.AlterField(
model_name='campaign_table',
name='start_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099), null=True),
),
migrations.AlterField(
model_name='customer_review',
name='Review_Time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='flash_sell',
name='flash_sell_end_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='flash_sell',
name='flash_sell_start_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='products',
name='Time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='products',
name='flash_sell_end_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='products',
name='flash_sell_start_time',
field=models.DateField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='staff_access',
name='First_Register_Time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
migrations.AlterField(
model_name='staff_access',
name='Last_login_Time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 9, 21, 18, 28, 44, 951099)),
),
]
| [
"mdabdurrahmanchowdhury1122@gmail.com"
] | mdabdurrahmanchowdhury1122@gmail.com |
c91cb1d76c2ecf1d0a7725da338964e214663f33 | 77f7adb86eee479c32fde983996fa35267d76fd5 | /sap/adt/search.py | c7e5462f1c36c16306d91e5e1efe671e65d7702a | [
"Apache-2.0"
] | permissive | jfilak/sapcli | cabd2390de37f70bbe55f0f7e343de123138e8b1 | 2839463fc2e2e2c1f35aa2203e4880d4e54462e7 | refs/heads/master | 2023-08-31T11:15:38.005314 | 2023-08-29T14:03:43 | 2023-08-29T14:30:46 | 158,893,144 | 54 | 26 | Apache-2.0 | 2023-09-11T14:19:10 | 2018-11-24T01:42:11 | Python | UTF-8 | Python | false | false | 809 | py | """Wraps ADT search functionality"""
from sap.adt.objects import ADTObjectReferences
import sap.adt.marshalling
class ADTSearch:
"""ADT Search functionality"""
def __init__(self, connection):
self._connection = connection
def quick_search(self, term: str, max_results: int = 5) -> ADTObjectReferences:
"""Performs the quick object search"""
resp = self._connection.execute(
'GET',
'repository/informationsystem/search',
params={
'operation': 'quickSearch',
'maxResults': max_results,
'query': term
}
)
results = ADTObjectReferences()
marshal = sap.adt.marshalling.Marshal()
marshal.deserialize(resp.text, results)
return results
| [
"jakub@thefilaks.net"
] | jakub@thefilaks.net |
93800dc28f160d79cf2aae36684a9306099188bd | 1064f70fefad3a49ad75276bc8638310eace6477 | /solution_scripts/serial_scripts/vdns/test_vdns.py | dd713a1d9973e487f10b1b9c3465df220a690380 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | gokulchandrap/contrail-test | d80e1dfe54d191372c2fe9db6bb1ec6997c5022c | 32d6fa43490a3234eb9560839b7d0de06e28b9bb | refs/heads/master | 2021-01-19T10:35:33.821533 | 2017-04-10T21:33:20 | 2017-04-10T21:33:20 | 87,881,182 | 1 | 0 | null | 2017-04-11T02:52:17 | 2017-04-11T02:52:17 | null | UTF-8 | Python | false | false | 4,358 | py | # Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run vdns_tests'. To run specific tests,
# You can do 'python -m testtools.run -l vdns_tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
import os
import unittest
import fixtures
import testtools
import traceback
from policy_test import *
from multiple_vn_vm_test import *
from tcutils.wrappers import preposttest_wrapper
from tcutils.pkgs.Traffic.traffic.core.stream import Stream
from tcutils.pkgs.Traffic.traffic.core.profile import create, ContinuousProfile
from tcutils.pkgs.Traffic.traffic.core.helpers import Host
from tcutils.pkgs.Traffic.traffic.core.helpers import Sender, Receiver
from base import BasevDNSRestartTest
from common import isolated_creds
import inspect
from vnc_api import vnc_api
from vnc_api.gen.resource_test import *
from vdns_fixture import *
from floating_ip import *
from policy_test import *
from control_node import *
from user_test import UserFixture
import test
class TestvDNSRestart(BasevDNSRestartTest):
@classmethod
def setUpClass(cls):
super(TestvDNSRestart, cls).setUpClass()
def runTest(self):
pass
#end runTest
@preposttest_wrapper
def test_vdns_controlnode_switchover(self):
''' This test tests control node switchover functionality
1. Create VDNS server object
2. Associate VDNS with IPAM
3. Launch VN with IPAM
4. Launch VM with VN Created above. This test verifies on launch of VM agent should update DNS 'A' and 'PTR' records
5. Ping VMs using VM name
6. Restart active control node
7. Ping VMs using VM name
Pass criteria: Step 4,5 and 7 should pass
Maintainer: cf-test@juniper.net
'''
restart_process = 'ControlNodeRestart'
self.vdns_with_cn_dns_agent_restart(restart_process)
return True
@preposttest_wrapper
def test_vdns_dns_restart(self):
''' This test test dns process restart functionality
1. Create VDNS server object
2. Associate VDNS with IPAM
3. Launch VN with IPAM
4. Launch VM with VN Created above. This test verifies on launch of VM agent should update DNS 'A' and 'PTR' records
5. Ping VMs using VM name
6. Restart the dns process in the active control node
7. Ping VMs using VM name
Pass criteria: Step 4, 5 and 7 should pass
Maintainer: cf-test@juniper.net
'''
restart_process = 'DnsRestart'
self.vdns_with_cn_dns_agent_restart(restart_process)
return True
@preposttest_wrapper
def test_vdns_agent_restart(self):
'''This test tests agent process restart functionality
1. Create VDNS server object
2. Associate VDNS with IPAM
3. Launch VN with IPAM
4. Launch VM with VN Created above. This test verifies on launch of VM agent should update DNS 'A' and 'PTR' records
5. Ping VMs using VM name
6. Restart the agent process in the compute node
7. Ping VMs using VM name
Pass criteria: Step 4, 5 and 7 should pass
Maintainer: cf-test@juniper.net
'''
restart_process = 'AgentRestart'
self.vdns_with_cn_dns_agent_restart(restart_process)
return True
@preposttest_wrapper
def test_vdns_named_restart(self):
'''This test tests named process restart functionality
1. Create VDNS server object
2. Associate VDNS with IPAM
3. Launch VN with IPAM
4. Launch VM with VN Created above. This test verifies on launch of VM agent should update DNS 'A' and 'PTR' records
5. Ping VMs using VM name
6. Restart the named process in the active control node
7. Ping VMs using VM name
Pass criteria: Step 4, 5 and 7 should pass
Maintainer: cf-test@juniper.net
'''
restart_process = 'NamedRestart'
self.vdns_with_cn_dns_agent_restart(restart_process)
return True
if __name__ == '__main__':
unittest.main()
# end of TestVdnsFixture
| [
"vageesant@juniper.net"
] | vageesant@juniper.net |
1b3e443d285d7eea5a39e9ed896c107b52115972 | ef2c8185e291d3e11df9406c14af78fd0a8b5b9a | /getWeather.py | c68590ead3effc53bb1fde481dcd08077b07951c | [] | no_license | davehedengren/weather | c855df8ee51de195b3fca095e2e059bcebeadc51 | 014ca831eadb12faa515c38328eb60273dff5775 | refs/heads/master | 2021-01-01T18:17:42.493236 | 2014-09-20T02:25:00 | 2014-09-20T02:25:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | import datetime
import requests
import json
apikey = ""
cdoHeaders = {"token":apikey}
electionDates = []
electionDates.append(datetime.date(1981,3,18))
electionDates.append(datetime.date(1982,9,8))
electionDates.append(datetime.date(1981,10,10))
#r = requests.get("http://www.ncdc.noaa.gov/cdo-web/api/v2/", headers=cdoHeaders)
coverageArea="47.5204,-122.2047,47.6139,-122.1065"
startDate=datetime.date(2004,1,1)
endDate=datetime.date(2012,1,1)
r = requests.get("http://www.ncdc.noaa.gov/cdo-web/api/v2/stations?datasetid=GHCND&datatypeid=TMAX&datatypeid=TMIN&datatypeid=TPCP&extent="+coverageArea+"&startdate="+str(startDate)+"&enddate="+str(endDate)+"&sort=datacoverage&sortorder=desc",headers=cdoHeaders)
id = r.json()['results'][0]['id']
r = requests.get("http://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid=GHCND&datatypeid=TPCP&datatypeid=TMAX&datatypeid=TMIN&stationid="+id+"&startdate="+str(startDate)+"&enddate="+str(endDate),headers=cdoHeaders)
| [
"james.p.campbell@gmail.com"
] | james.p.campbell@gmail.com |
1786a9be17fe0e1ada8ffd21656dc9b7411dd30c | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/hb_lcd_static/query_date.py | 9b3c22f222e4c2646872d0d2ac59d8ecadaef8c3 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | #coding=utf-8
__author__ = 'Administrator'
import datetime
def test():
today=datetime.datetime.now()
t=today.strftime('%Y%m%d')
weekday=today.strftime("%w")
# week=today.isoweekday()
week=today.strftime("%U")
x=today.replace()
print(t,week)
def gt_getWeeklyDate(date=datetime.datetime.now()):
#计算八周前的周日
weekday = date.isoweekday()
delta = 56 + weekday
date2 = date+datetime.timedelta(days=-delta)
date_str = date2.strftime('%Y%m%d')
return date_str
def gt_getMonthlyDate(date=datetime.datetime.now()):
#计算一个月前的1号
date2 = date+datetime.timedelta(days=-32)
date3 = date2.replace(day=1)
date3_str = date3.strftime('%Y%m%d')
return date3_str
pass
def hb_getWeeklyDate(date=datetime.datetime.now()):
#计算两周前的周一
weekday=date.isoweekday()
delta=6+weekday
date2=date+datetime.timedelta(days=-delta)
date_str=date2.strftime('%Y-%m-%d')
return date_str
def hb_getMonthlyDate(date=datetime.datetime.now()):
#计算这个月的1号,缓7天(防止数据出问题)
date2=date+datetime.timedelta(days=-7)
date3=date2.replace(day=1)
date3_str=date3.strftime('%Y-%m-%d')
return date3_str
def hb_getMonthlyDate_new(month_diff,date=datetime.datetime.now()):
#计算这个月的1号,缓7天(防止数据出问题)
month_days=month_diff*30
date2=date+datetime.timedelta(days=-month_days)
date3=date2.replace(day=1)
date3_str=date3.strftime('%Y-%m-%d')
return date3_str
def hb_getMonthlyDate_lcd(month_diff, date=datetime.datetime.now()):
#计算这个月的1号,缓7天(防止数据出问题)
month_days=month_diff*30
date2=date+datetime.timedelta(days=-month_days)
date3=date2.replace(day=1)
date3_str=date3.strftime('%Y-%m-%d')
result_date_str="'"+date3_str+"'"
return result_date_str
if __name__=="__main__":
# today=datetime.datetime.now()
# today=datetime.date(2014,9,21)
# week=gt_getWeeklyDate(today)
# month=gt_getMonthlyDate(today)
# week=hb_getWeeklyDate(today)
# month=hb_getMonthlyDate()
# print(week,month)
date1 = hb_getMonthlyDate_lcd(0)
print(date1)
pass
| [
"744996162@qq.com"
] | 744996162@qq.com |
aabc77683ae4d1a2e9070b2cfc9c0bca517cae46 | 3b6ba8d4dc4dd8fe572c1419709facc7bdc2274e | /ai4water/postprocessing/explain/utils.py | c75c7956c174298a9e5dcb9027b3612b05ed0729 | [
"MIT"
] | permissive | AtrCheema/AI4Water | fd5bfda1eee530e7bc9ed1b2130ed49dd0d5bf89 | ec2a4a426673b11e3589b64cef9d7160b1de28d4 | refs/heads/master | 2023-09-04T10:59:55.902200 | 2023-02-10T15:55:32 | 2023-02-10T15:55:32 | 284,684,202 | 47 | 17 | MIT | 2023-02-10T15:56:43 | 2020-08-03T11:39:22 | Python | UTF-8 | Python | false | false | 3,465 | py |
from ai4water.backend import sklearn_models
def convert_ai4water_model(old_model, framework=None, explainer=None):
"""convert ai4water's Model class to sklearn/xgboost..etc type model classes
"""
new_model = old_model
model_name = old_model.__class__.__name__
if old_model.__class__.__name__ == "Model" and "ai4water" in str(type(old_model)):
# this is ai4water model class
if old_model.category == "ML":
model_name = list(old_model.config['model'].keys())[0]
new_model, _explainer = to_native(old_model, model_name)
explainer = explainer or _explainer
framework = "ML"
else:
framework = "DL"
explainer = explainer or "DeepExplainer"
if 'functional' in str(type(old_model)):
new_model = functional_to_keras(old_model)
return new_model, framework, explainer, model_name
def to_native(model, model_name:str):
# because transformations are part of Model in ai4water, and TreeExplainer
# is based upon on tree structure, it will not consider ransformation as part of Model
if model.config['x_transformation']or model.config['y_transformation']:
explainer = "KernelExplainer"
else:
explainer = "TreeExplainer"
if model_name.startswith("XGB"):
import xgboost
BaseModel = xgboost.__dict__[model_name]
elif model_name.startswith("LGB"):
import lightgbm
BaseModel = lightgbm.__dict__[model_name]
elif model_name.startswith("Cat"):
import catboost
BaseModel = catboost.__dict__[model_name]
elif model_name in sklearn_models:
BaseModel = sklearn_models[model_name]
explainer = "KernelExplainer"
else:
raise ValueError
class DummyModel(BaseModel):
"""First priority is to get attribute from ai4water's Model and then from
the underlying library's model class."""
def __getattribute__(self, item):
return getattr(model, item)
def __getattr__(self, item):
return getattr(model._model, item)
return DummyModel(), explainer
def get_features(features, features_to_explain):
if features_to_explain is not None:
if isinstance(features_to_explain, str):
features_to_explain = [features_to_explain]
else:
features_to_explain = features
assert isinstance(features_to_explain, list)
for f in features_to_explain:
assert f in features
return features_to_explain
def functional_to_keras(old_model):
"""converts the model of functional api to keras model"""
assert old_model.config['x_transformation'] is None
assert old_model.config['y_transformation'] is None
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten
# keras model from functional api
old_model = old_model._model
old_m_outputs = old_model.outputs
if isinstance(old_m_outputs, list):
assert len(old_m_outputs) == 1
old_m_outputs = old_m_outputs[0]
if len(old_m_outputs.shape) > 2: # (None, ?, ?)
new_outputs = Flatten()(old_m_outputs) # (None, ?)
assert new_outputs.shape.as_list()[-1] == 1 # (None, 1)
new_model = Model(old_model.inputs, new_outputs)
else: # (None, ?)
assert old_m_outputs.shape.as_list()[-1] == 1 # (None, 1)
new_model = old_model
return new_model | [
"ather_abbas786@yahoo.com"
] | ather_abbas786@yahoo.com |
13116bac9d15d4ef5ffee054babf761735154987 | 329f0e8bf63b8ee7fc55ca1c9ea0a3fcc58bbff5 | /app/views/tournament_view.py | 983d33954773dd9de1ce00ca71b7f49646177206 | [] | no_license | pwgraham91/cratejoy-darts | 6be90ead11e6580d1630d1cf95b1402118200d72 | f6e3eb3dd5f47c2c155dcd85a0d3f46b00eee38d | refs/heads/master | 2022-09-21T10:44:51.908579 | 2017-01-01T02:25:44 | 2017-01-01T02:25:44 | 70,245,826 | 0 | 0 | null | 2022-09-16T17:45:43 | 2016-10-07T12:47:36 | Python | UTF-8 | Python | false | false | 1,814 | py | from datetime import datetime
import json
import flask
from flask_login import login_required
from app import app, db
from app.libs.tournament_lib import make_tournament
from app.models import Tournament, User
@app.route('/tournaments', methods=['GET'])
def tournaments():
session = db.session
all_tournaments = session.query(Tournament).order_by(Tournament.date_started.desc()).all()
return flask.render_template('tournaments/tournaments.html',
user=flask.g.user,
tournaments=all_tournaments)
@app.route('/tournaments/<int:tournament_id>', methods=['GET'])
@login_required
def tournament_get(tournament_id):
session = db.session
queried_tournament = session.query(Tournament).get(tournament_id)
return flask.render_template('tournaments/tournament.html',
user=flask.g.user,
tournament=queried_tournament)
@app.route('/tournaments/add', methods=['GET'])
@login_required
def add_tournament_get():
session = db.session
players = session.query(User).all()
return flask.render_template('tournaments/add_tournament.html',
user=flask.g.user,
players=players)
@app.route('/tournaments/add', methods=['POST'])
@login_required
def add_tournament_post():
session = db.session
data = flask.request.json
added_tournament = make_tournament(session, datetime.strptime(data['date_started'], '%m/%d/%Y'),
data['random_draw'], data['player_ids'])
session.commit()
return flask.Response(json.dumps({
'id': added_tournament.id,
'random_draw': added_tournament.random_draw,
}), mimetype=u'application/json')
| [
"pwgraham91@gmail.com"
] | pwgraham91@gmail.com |
9cf53acce71b5556bc2c06b1ab3e397c6972de74 | 11a739cc8dc520c2aa0979236391af43844b4796 | /lti_synchronization/moodle/lti13/auth.py | 7989b7ee4f4dca630b23643626aacab7ba89afab | [] | no_license | antibagr/ncsu-jupyterhub | 872751d8549ee58ebab9a668f22afd835fdffbb0 | 918236870fd95e5ef82ffdf0e3d25cd418b6550e | refs/heads/master | 2023-07-18T01:28:20.971190 | 2021-09-02T19:53:10 | 2021-09-02T19:53:10 | 363,832,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,949 | py | import json
import os
import time
import typing as t
import urllib
import uuid
import jwt
import pem
from Crypto.PublicKey import RSA
from jwcrypto.jwk import JWK
from loguru import logger
from moodle.utils import dump_json
from tornado.httpclient import AsyncHTTPClient, HTTPClientError
async def get_lms_access_token(
token_endpoint: str,
private_key_path: str,
client_id: str,
scope: t.Optional[str] = None,
) -> str:
'''
Gets an access token from the LMS Token endpoint
by using the private key (pem format) and client id
Args:
token_endpoint (str): The url that will be used to make the request
private_key_path (str): specify where the pem is
client_id (str): For LTI 1.3 the Client ID that was obtained with the tool setup
scope (type): . Defaults to None.
Returns:
str: A json with the token value
'''
def _get_params() -> t.Generator:
'''
Formatted parameters to send to logger.
'''
yield dump_json({k: str(v) for k, v in token_params.items()})
yield token[-5:]
yield dump_json({'scope': scope.split()})
_dict = {**params, 'client_assertion': params['client_assertion'][-5:]}
yield dump_json({k: str(v) for k, v in _dict.items()})
# if 'f_exc' in globals():
# yield f_exc.response.body if f_exc.response else f_exc.message
# else:
# yield json.loads(resp.body)
logger.info('Token endpoint is: %s' % token_endpoint)
token_params = {
'iss': client_id,
'sub': client_id,
'aud': token_endpoint,
'iat': int(time.time()) - 5,
'exp': int(time.time()) + 60,
'jti': str(uuid.uuid4()),
}
_params = _get_params()
logger.debug('Getting lms access token with parameters\n%s' % next(_params))
# get the pem-encoded content
private_key = get_pem_text_from_file(private_key_path)
headers = get_headers_to_jwt_encode(private_key)
token = jwt.encode(token_params, private_key,
algorithm='RS256', headers=headers)
logger.debug('Obtaining token %s' % next(_params))
scope: str = scope or ' '.join([
'https://purl.imsglobal.org/spec/lti-ags/scope/score',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem',
'https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly',
'https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly',
])
logger.debug('Scope is %s' % next(_params))
params = {
'grant_type': 'client_credentials',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'client_assertion': token,
'scope': scope,
}
logger.debug('OAuth parameters are:\n\n%s' % next(_params))
client = AsyncHTTPClient()
body = urllib.parse.urlencode(params)
try:
resp = await client.fetch(token_endpoint, method='POST', body=body, headers=None)
except HTTPClientError as f_exc:
logger.info('Error by obtaining a token with lms. Detail: %s' % f_exc.response.body if f_exc.response else f_exc.message)
raise
else:
logger.debug('Token response body is %s' % json.loads(resp.body))
return json.loads(resp.body)
def get_jwk(public_key: str) -> dict:
'''
Load public key as dictionary
Args:
public_key (str): Path to pem
Returns:
dict: Exported public key
'''
jwk_obj = JWK.from_pem(public_key)
public_jwk = json.loads(jwk_obj.export_public())
public_jwk['alg'] = 'RS256'
public_jwk['use'] = 'sig'
return public_jwk
def get_headers_to_jwt_encode(private_key_text: str) -> t.Optional[dict]:
'''
Helper method that gets the dict headers to use in jwt.encode method
Args:
private_key_text (str): The PEM-Encoded content as text
Returns:
dict: A dict if the publickey can be exported or None otherwise
'''
public_key = RSA.importKey(private_key_text).publickey().exportKey()
headers = None
if public_key:
jwk = get_jwk(public_key)
headers = {'kid': jwk.get('kid')} if jwk else None
return headers
def get_pem_text_from_file(private_key_path: str) -> str:
'''
Parses the pem file to get its value as unicode text.
Check the pem permission, parse file generates
a list of PEM objects and return plain text from it.
Args:
private_key_path (str): Path to the private key file.
Returns:
str: Text from PEM parsed file
Raises:
PermissionError: PEM File is not accessible
ValueError: PEM file is invalid, no certificates found.
'''
if not os.access(private_key_path, os.R_OK):
raise PermissionError()
certs = pem.parse_file(private_key_path)
if not certs:
raise ValueError('Invalid pem file.')
return certs[0].as_text()
| [
"antibagr@yandex.ru"
] | antibagr@yandex.ru |
fa8833eb0e33cf9eddba27b06d19308957a69f7c | 7f92c2fc131ca637d8b7c2a4dbba4b974884e786 | /lab4/plottingScripts/plot2.py | cfaf4f0ad69dc2e50527e08f8c30a7aea145732f | [] | no_license | byronwasti/CircuitsLabs | 2c5694f07a59adedddde361d0a85a690a83e096b | be1227c504ed1a2b81b6d670cbaa45d4b8be8e17 | refs/heads/master | 2020-05-23T11:15:14.853587 | 2017-09-03T18:53:50 | 2017-09-03T18:53:50 | 80,369,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
FILENAME1 = "../data/experiment1_Transistor1_1.csv"
FILENAME2 = "../data/experiment1_Transistor2_1.csv"
FILENAME3 = "../data/experiment1_Transistor3_1.csv"
FILENAME4 = "../data/experiment1_Transistor4_1.csv"
FILENAMES = [FILENAME1, FILENAME2, FILENAME3, FILENAME4]
DATAX = [ [], [], [], [] ]
DATAY1 = [ [], [], [], [] ]
DATAY2 = [ [], [], [], [] ]
for j, FILENAME in enumerate(FILENAMES):
with open(FILENAME, 'r') as f:
reader = csv.reader(f)
print(j)
for i, row in enumerate(reader):
if i == 0 : continue
if float(row[0]) > 0.9 : continue
if float(row[0]) < 0.4 : continue
DATAX[j].append(float(row[0]))
DATAY1[j].append(float(row[1]))
DATAY2[j].append(float(row[2]) - float(row[1]))
AVGX = []
AVGY1 = []
AVGY2 = []
for i in range(len(DATAX[0])):
x = 0
y1 = 0
y2 = 0
for j in range(4):
x += DATAX[j][i]
y1 += DATAY1[j][i]
y2 += DATAY2[j][i]
AVGX.append(x/4)
AVGY1.append(y1/4)
AVGY2.append(y2/4)
PER_DIFF_X = [ [], [], [], [] ]
PER_DIFF_Y1 = [ [], [], [], [] ]
PER_DIFF_Y2 = [ [], [], [], [] ]
for i in range(4):
for j,x in enumerate(DATAX[i]):
PER_DIFF_X[i].append( 100 * abs(AVGX[j] - x ) / AVGX[j] )
for j,y in enumerate(DATAY1[i]):
PER_DIFF_Y1[i].append( 100 * abs(AVGY1[j] - y ) / AVGY1[j] )
for j,y in enumerate(DATAY2[i]):
PER_DIFF_Y2[i].append( 100 * abs(AVGY2[j] - y ) / AVGY2[j] )
#for i in range(4):
#plt.semilogy(DATAX[i], DATAY1[i], '.', label="Transistor %i" % i)
#plt.semilogy(AVGX, AVGY1, '.', label="Transistor AVG")
for i in range(4):
plt.plot(AVGX, PER_DIFF_Y1[i], '.', label="Transistor %i" % i)
plt.xlabel("Base Voltage (V)")
plt.ylabel("Percent Difference from Mean (%)")
plt.title("Collector Current Percent Difference from Mean Value as a Function of Base Voltage")
plt.legend()
plt.show()
| [
"byron.wasti@gmail.com"
] | byron.wasti@gmail.com |
735d3bc443ff036ece35936f200ed7336e1323b7 | 230b4ce17bf74ca2d7c054ab124e0f8ca49a99cb | /test/core/test_alarm.py | 3b8ac654c9e45b9ff07fe2daa01555c0573366b2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ulrikpedersen/malcolm | e827e6829881bfa71366231fd7dfe535709b7d4d | e59a095f9fc4be65f931e728a24919843b8f7fa9 | refs/heads/master | 2021-01-16T21:14:35.975923 | 2015-09-14T08:24:28 | 2015-09-14T08:24:28 | 42,232,223 | 0 | 0 | null | 2015-09-10T08:26:40 | 2015-09-10T08:26:39 | Python | UTF-8 | Python | false | false | 686 | py | #!/bin/env dls-python
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from malcolm.core.alarm import Alarm, AlarmSeverity, AlarmStatus
class AlarmTest(unittest.TestCase):
def test_ok(self):
ok = Alarm.ok()
self.assertEqual(ok.status, AlarmStatus.noStatus)
self.assertEqual(ok.severity, AlarmSeverity.noAlarm)
self.assertEqual(ok.message, "No alarm")
def test_eq(self):
ok = Alarm.ok()
also_ok = Alarm(
AlarmSeverity.noAlarm, AlarmStatus.noStatus, "No alarm")
self.assertEqual(ok, also_ok)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"tom.cobb@diamond.ac.uk"
] | tom.cobb@diamond.ac.uk |
9783a9f8f220edbd0af2839fc5158d88e273bd04 | d700b9ad1e0b7225871b65ce0dafb27fb408c4bc | /students/k3343/practical_works/Nazarenko_Uliana/Pr_3/Django_project_Nazarenko/django_project_Nazarenko/asgi.py | 4d4c025f140af53b821adf407bb5757598fc0b81 | [
"MIT"
] | permissive | TonikX/ITMO_ICT_WebProgramming_2020 | a8c573ed467fdf99327777fb3f3bfeee5714667b | ba566c1b3ab04585665c69860b713741906935a0 | refs/heads/master | 2023-01-11T22:10:17.003838 | 2020-10-22T11:22:03 | 2020-10-22T11:22:03 | 248,549,610 | 10 | 71 | MIT | 2023-01-28T14:04:21 | 2020-03-19T16:18:55 | Python | UTF-8 | Python | false | false | 425 | py | """
ASGI config for django_project_Nazarenko project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_Nazarenko.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | TonikX.noreply@github.com |
82d3528e277282de287161d0a34771658023f07d | 78171e8cfbc44c547ee07d6e5a85e595fb7397a1 | /shortener/management/commands/refreshcodes.py | 2c1a193f0de96c390f8ef0d6d8cdbdab70edd8df | [] | no_license | jimpalowski/URLshortener | 37b41a3818679c1e0707f02f57147e87a651063c | f7b8450ce2e858dff1e6fec11f9fd5dfec3d3e26 | refs/heads/master | 2021-09-05T11:44:36.564719 | 2018-01-27T01:59:56 | 2018-01-27T01:59:56 | 118,685,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from django.core.management.base import BaseCommand, CommandError
from shortener.models import KirrURL
class Command(BaseCommand):
help = 'Refreshes all KirrURL short codes'
def add_arguments(self, parser):
parser.add_argument('--items', type=int)
def handle(self, *args, **options):
return KirrURL.objects.refresh_shortcodes(items=options['items'])
| [
"palowskijim@gmail.com"
] | palowskijim@gmail.com |
2ce3692b4eb26443c3c108c0237eac10b20f7cc4 | 9879e2692b74928b0e23e485846f49558cd4b5d2 | /actions/weather/xinzhi_api.py | ecf4fd123da7bdafbefec41514be9acab5da37d0 | [] | no_license | xfzhu2003/Chatbot_RASA | 153f55e09aa737c71b4ec68ad7dd90a4a6bcfa2b | 36ad035b0f0498ec743fbe140caad9e26bb3b8de | refs/heads/master | 2020-06-29T04:32:27.638560 | 2019-07-23T08:43:59 | 2019-07-23T08:43:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # -*- coding: utf-8 -*-
'''
@Author : Xu
@Software: PyCharm
@File : xinzhi_api.py
@Time : 2019-07-23 14:06
@Desc :
'''
import os
import requests
import json
KEY = 'Sq6NfAburbGs9MGQc' # API key
UID = "" # 用户ID, TODO: 当前并没有使用这个值,签名验证方式将使用到这个值
LOCATION = 'beijing' # 所查询的位置,可以使用城市拼音、v3 ID、经纬度等
API = 'https://api.seniverse.com/v3/weather/now.json' # API URL,可替换为其他 URL
UNIT = 'c' # 单位
LANGUAGE = 'zh-Hans' # 查询结果的返回语言
# https://api.seniverse.com/v3/weather/now.json?key=your_key&location=beijing&language=zh-Hans&unit=c
def fetch_weather(location, start=0, days=15):
result = requests.get(API, params={
'key': KEY,
'location': location,
'language': LANGUAGE,
'unit': UNIT,
# 'start': start,
# 'days': days
}, timeout=2)
return result.json()
def get_weather_by_day(location, day=1):
result = fetch_weather(location)
normal_result = {
"location": result["results"][0]["location"],
"result": result["results"][0]["now"]
}
return normal_result
if __name__ == '__main__':
default_location = "合肥"
result = fetch_weather(default_location)
print(json.dumps(result, ensure_ascii=False))
default_location = "合肥"
result = get_weather_by_day(default_location)
print(json.dumps(result, ensure_ascii=False))
| [
"xushengquan@souche.com"
] | xushengquan@souche.com |
7cbe2c458a246403bcdf0ad805dd4894b73f157b | 99bb0330d8b3a4c4403dd92d4236a809c55ca084 | /home/migrations/0001_load_initial_data.py | ac20a343b665405d955a91bbc18399a518073928 | [] | no_license | crowdbotics-apps/william-bucks-28639 | 2f329c5186d4214bd0c8fca955d1e66299872cc3 | 153ad30fa5661fb9f64a5312865d724df5f919fa | refs/heads/master | 2023-06-16T08:55:22.588514 | 2021-07-09T02:21:40 | 2021-07-09T02:21:40 | 384,298,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "william-bucks-28639.botics.co"
site_params = {
"name": "william bucks",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ae4c966261d574ffb508b56d8535f538f0a77c37 | 56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a | /applications/CoSimulationApplication/tests/co_sim_io_py_exposure_aux_files/import_export_data.py | 5ff618370c8074b421e087465b78ac9d99e43fa5 | [
"BSD-3-Clause"
] | permissive | KratosMultiphysics/Kratos | 82b902a2266625b25f17239b42da958611a4b9c5 | 366949ec4e3651702edc6ac3061d2988f10dd271 | refs/heads/master | 2023-08-30T20:31:37.818693 | 2023-08-30T18:01:01 | 2023-08-30T18:01:01 | 81,815,495 | 994 | 285 | NOASSERTION | 2023-09-14T13:22:43 | 2017-02-13T10:58:24 | C++ | UTF-8 | Python | false | false | 1,187 | py | from KratosMultiphysics.CoSimulationApplication import CoSimIO
connection_settings = CoSimIO.Info()
connection_settings.SetString("my_name", "impExp")
connection_settings.SetString("connect_to", "ExpImp")
connection_settings.SetInt("echo_level", 0)
info = CoSimIO.Connect(connection_settings)
connection_name = info.GetString("connection_name")
if info.GetInt("connection_status") != CoSimIO.ConnectionStatus.Connected:
raise Exception("Connecting failed")
import_info = CoSimIO.Info()
import_info.SetString("connection_name", connection_name)
import_info.SetString("identifier", "data_exchange_1")
imported_values = CoSimIO.DoubleVector()
CoSimIO.ImportData(import_info, imported_values)
# print(imported_values)
export_info = CoSimIO.Info()
export_info.SetString("connection_name", connection_name)
export_info.SetString("identifier", "data_exchange_2")
CoSimIO.ExportData(export_info, imported_values)
disconnect_settings = CoSimIO.Info()
disconnect_settings.SetString("connection_name", connection_name)
info = CoSimIO.Disconnect(disconnect_settings)
if info.GetInt("connection_status") != CoSimIO.ConnectionStatus.Disconnected:
raise Exception("Disconnecting failed")
| [
"philipp.bucher@tum.de"
] | philipp.bucher@tum.de |
d3aebf637485f84f291c10de24bda836d6fc353d | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/phonenumbers/data/region_SV.py | 980554e3e4441bc01fad20444684d67ad47c5caf | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 1,172 | py | """Auto-generated file, do not edit by hand. SV metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SV = PhoneMetadata(id='SV', country_code=503, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[267]\\d{7}|[89]\\d{6}(?:\\d{4})?', possible_length=(7, 8, 11)),
fixed_line=PhoneNumberDesc(national_number_pattern='2[1-6]\\d{6}', example_number='21234567', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='[67]\\d{7}', example_number='70123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{4}(?:\\d{4})?', example_number='8001234', possible_length=(7, 11)),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{4}(?:\\d{4})?', example_number='9001234', possible_length=(7, 11)),
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[267]']),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[89]']),
NumberFormat(pattern='(\\d{3})(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[89]'])])
| [
"gruzdevasch@gmail.com"
] | gruzdevasch@gmail.com |
6f7fc741072e85fe7ff40725f2a06199a7566dad | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2429/60755/316888.py | 9b05f45702d6ac80a070303aa58fb869df6cda0a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | a = input()
b = input()
c = input()
d = input()
e = input()
if c =="2 3 10 6 4 8 1":
print(8)
print(2)
elif c =="2 5 9 6 4 8 6" and e == "7 9 5 6 3 2" or c == "2 3 9 6 4 8 1":
print(7)
print(2)
elif c=="2 5 9 6 4 8 6":
print(7)
print(1)
elif c=="2 5 9 6 4 8 1":
print(7)
print(2)
else:
print(c) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
6d15a624bd2e58101f01b25a91efb3a7eed586f3 | 2e6cc958f1c95a7a698aaf41f8a0454b5d67e933 | /project/settings_example.py | d1a5a507c39e58906b883e7837a4e068d6f7af67 | [] | no_license | greenteamer/sp | aac53970fe77b49d9cac14c90ec3b57cce8585b4 | 93b400e3d92c5e3933a2225c17033f244da65859 | refs/heads/master | 2021-01-01T18:08:00.818208 | 2015-09-03T07:18:42 | 2015-09-03T07:18:42 | 30,750,314 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | import os
# File for storing custom settings
CURRPATH = os.path.abspath('.')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sp',
'USER': 'root',
'PASSWORD': 'balabas',
'HOST': '',
'PORT': '',
'TEST_CHARSET': 'UTF8',
}
}
# DEBUG_TOOLBAR_PATCH_SETTINGS = False
ADMIN_EMAIL = 'greenteamer@bk.ru'
ACCOUNT_ACTIVATION_DAYS = 2
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
['Source', '-', 'Save', 'NewPage', 'DocProps',
'Preview', 'Print', '-', 'Templates'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord',
'-', 'Undo', 'Redo'],
['Find', 'Replace', '-', 'SelectAll', '-', 'SpellChecker'],
['Image', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar'],
['Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'RemoveFormat'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-',
'Blockquote', '-', 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl'],
['Link', 'Unlink'],
['Styles', 'Format', 'Font', 'FontSize'],
['TextColor', 'BGColor'],
['Maximize', 'ShowBlocks', 'CreateDiv'],
],
'width': 100%,
'height': 500,
},
'interface': {
'toolbar': [
['Source', '-', 'Save', 'NewPage', 'DocProps',
'Preview', 'Print', '-', 'Templates'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord',
'-', 'Undo', 'Redo'],
['Find', 'Replace', '-', 'SelectAll', '-', 'SpellChecker'],
['Image', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar'],
['Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'RemoveFormat'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-',
'Blockquote', '-', 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl'],
['Link', 'Unlink'],
['Styles', 'Format', 'Font', 'FontSize'],
['TextColor', 'BGColor'],
['Maximize', 'ShowBlocks', 'CreateDiv'],
],
'width': 775,
'height': 500,
},
}
AUTH_USER_EMAIL_UNIQUE = True
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'teamer777@gmail.com'
EMAIL_HOST_PASSWORD = 'greenteamer1986'
EMAIL_PORT = 587
| [
"greenteamer@bk.ru"
] | greenteamer@bk.ru |
e25be79a841a2898e6a9ed529697f15b982b37a6 | ecd630f54fefa0a8a4937ac5c6724f9a3bb215c3 | /projeto/emprestimo/migrations/0041_emprestimo_taxa.py | 006336203fdc426f2b812eb430c7525507c2e35f | [] | no_license | israelwerther/Esctop_Israel_Estoque | 49968751464a38c473298ed876da7641efedf8de | d6ab3e502f2a97a0d3036351e59c2faa267c0efd | refs/heads/master | 2023-01-07T20:21:38.381593 | 2020-11-12T17:35:14 | 2020-11-12T17:35:14 | 258,642,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # Generated by Django 3.0.7 on 2020-11-10 18:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emprestimo', '0040_taxa_taxa_juros_a_m2'),
]
operations = [
migrations.AddField(
model_name='emprestimo',
name='taxa',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='emprestimo.Taxa'),
),
]
| [
"israelwerther48@outlook.com"
] | israelwerther48@outlook.com |
a577c88596007bb1886c9537651e16277de23926 | 8efe56ee34c455a6b1336897f6d457acbc9c10f9 | /src/metarl/tf/algos/rl2trpo.py | 1cd270c1df4c36602a1c34da1644196bccbf7cf9 | [
"MIT"
] | permissive | neurips2020submission11699/metarl | ab18d11e708bf569d76cb2fab2bcce089badd111 | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | refs/heads/master | 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,244 | py | """Trust Region Policy Optimization for RL2."""
from metarl.tf.algos import RL2
from metarl.tf.optimizers import ConjugateGradientOptimizer
from metarl.tf.optimizers import PenaltyLbfgsOptimizer
class RL2TRPO(RL2):
"""Trust Region Policy Optimization specific for RL^2.
See https://arxiv.org/abs/1502.05477.
Args:
rl2_max_path_length (int): Maximum length for trajectories with respect
to RL^2. Notice that it is different from the maximum path length
for the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (metarl.experiment.TaskSampler): Task sampler.
env_spec (metarl.envs.EnvSpec): Environment specification.
policy (metarl.tf.policies.StochasticPolicy): Policy.
baseline (metarl.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in metarl.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
kl_constraint (str): KL constraint, either 'hard' or 'soft'.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
meta_evaluator (metarl.experiment.MetaEvaluator): Evaluator for meta-RL
algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
name (str): The name of the algorithm.
"""
def __init__(self,
rl2_max_path_length,
meta_batch_size,
task_sampler,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
kl_constraint='hard',
entropy_method='no_entropy',
flatten_input=True,
meta_evaluator=None,
n_epochs_per_eval=10,
name='TRPO'):
if not optimizer:
if kl_constraint == 'hard':
optimizer = ConjugateGradientOptimizer
elif kl_constraint == 'soft':
optimizer = PenaltyLbfgsOptimizer
else:
raise ValueError('Invalid kl_constraint')
if optimizer_args is None:
optimizer_args = dict()
super().__init__(rl2_max_path_length=rl2_max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=task_sampler,
env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=n_epochs_per_eval,
name=name)
| [
"neurips2020submission11699@gmail.com"
] | neurips2020submission11699@gmail.com |
a06d447e530df7be2ae057e8a8a13cf2100786c7 | 06a2dab18197a13fc3371debd29b476ae99cb01c | /Monotop/python/Selection.py | 8868a7dc5175ea6bffe6870ec897aa76ca92447c | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | PandaPhysics/PandaAnalysis | 397a031f9e8d399be1814ab04dd525d69b41f060 | 3167d106d41dfce58219c3e07d30e201ee823b55 | refs/heads/master | 2021-06-18T13:52:57.650900 | 2019-04-08T17:35:29 | 2019-04-08T17:35:29 | 168,376,672 | 0 | 0 | NOASSERTION | 2019-04-08T17:33:55 | 2019-01-30T16:34:09 | C++ | UTF-8 | Python | false | false | 4,824 | py | from PandaCore.Tools.Misc import *
from re import sub
triggers = {
'met':'(trigger&1)!=0',
'ele':'(trigger&2)!=0',
'pho':'(trigger&4)!=0',
}
metFilter='metFilter==1'
topTagSF = '%f*(fj1IsMatched==1)+%f*(fj1IsMatched==0)'%(1.007,1.02)
ak4bTagSF = 'sf_btag0*(isojetNBtags==0)+sf_btag1*(isojetNBtags==1)+1*(isojetNBtags>1)'
photonSF = '0.93'
presel = 'nFatJet==1 && fj1Pt>250 && TMath::Abs(fj1Eta)<2.4 && fj1Tau32<0.61 && 110<fj1MSD && fj1MSD<210'
cuts = {
# analysis regions
'signal' : tAND(metFilter,tAND(presel,'pfmet>175 && puppimet>250 && dphipuppimet>1.1 && (nLooseMuon+nLooseElectron+nLoosePhoton+nTau)==0 && fj1MaxCSV>0.46 && isojetNBtags==0')),
# 'signal_nomf' : tAND(presel,'met>175 && puppimet>250 && dphipuppimet>1.1 && (nLooseMuon+nLooseElectron+nLoosePhoton+nTau)==0 && fj1MaxCSV>0.46 && isojetNBtags==0 && fj1isTight==1 && TMath::Abs(met-calomet)/puppimet<0.5'),
'singlemuontop' : tAND(metFilter,tAND(presel,'UWmag>250 && (nLooseElectron+nLoosePhoton+nTau)==0 && nLooseMuon==1 && looseLep1IsTight==1 && fj1MaxCSV>0.46 && isojetNBtags==1')),
'singleelectrontop' : tAND(metFilter,tAND(presel,'UWmag>250 && (nLooseMuon+nLoosePhoton+nTau)==0 && nLooseElectron==1 && looseLep1IsTight==1 && fj1MaxCSV>0.46 && isojetNBtags==1 && puppimet>40')),
'singlemuonw' : tAND(metFilter,tAND(presel,'UWmag>250 && (nLooseElectron+nLoosePhoton+nTau)==0 && nLooseMuon==1 && looseLep1IsTight==1 && fj1MaxCSV<0.46 && isojetNBtags==0')),
'singleelectronw' : tAND(metFilter,tAND(presel,'UWmag>250 && (nLooseMuon+nLoosePhoton+nTau)==0 && nLooseElectron==1 && looseLep1IsTight==1 && fj1MaxCSV<0.46 && isojetNBtags==0 && puppimet>40')),
'dimuon' : tAND(metFilter,tAND(presel,'UZmag>250 && (nLooseElectron+nLoosePhoton+nTau)==0 && nLooseMuon==2 && looseLep1IsTight==1')),
'dielectron' : tAND(metFilter,tAND(presel,'UZmag>250 && (nLooseMuon+nLoosePhoton+nTau)==0 && nLooseElectron==2 && looseLep1IsTight==1')),
'photon' : tAND(metFilter,tAND(presel,'UAmag>250 && (nLooseMuon+nLooseElectron+nTau)==0 && nLoosePhoton==1 && loosePho1IsTight==1')),
}
tag_presel = removeCut(removeCut(tOR(cuts['singlemuontop'],cuts['singleelectrontop']),'fj1Tau32'),'fj1MSD')
mistag_presel = tAND(removeCut(removeCut(cuts['photon'],'fj1Tau32'),'fj1MSD'),'fj1MSD>40')
tag = 'fj1Tau32<0.61 && 110<fj1MSD && fj1MSD<210'
tt_cuts = {
'tag' : tag_presel,
'tag_pass' : tAND(tag,tag_presel),
'tag_fail' : tAND(tNOT(tag),tag_presel),
'mistag' : mistag_presel,
'mistag_pass' : tAND(tag,mistag_presel),
'mistag_fail' : tAND(tNOT(tag),mistag_presel),
}
'''
'signal' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag1*sf_lepTrack',topTagSF),ak4bTagSF),
'top' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag1*sf_lepTrack',topTagSF),ak4bTagSF),
'w' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag0*sf_lepTrack',topTagSF),ak4bTagSF),
'notag' : tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_lepTrack',topTagSF),
'signal_sf' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag1',topTagSF),ak4bTagSF),
'top_sf' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag1',topTagSF),ak4bTagSF),
'w_sf' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkZ*sf_qcdZ*sf_ewkW*sf_qcdW*sf_ewkA*sf_qcdA*sf_tt*sf_sjbtag0',topTagSF),ak4bTagSF),
'''
weights = {
# analysis weights
'signal' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_ewkV*sf_qcdV*sf_tt*sf_sjbtag1',topTagSF),ak4bTagSF),
'top' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_lepTrack*sf_ewkV*sf_qcdV*sf_tt*sf_sjbtag1',topTagSF),ak4bTagSF),
'w' : tTIMES(tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_lepTrack*sf_ewkV*sf_qcdV*sf_tt*sf_sjbtag0',topTagSF),ak4bTagSF),
'notag' : tTIMES('%f*normalizedWeight*sf_pu*sf_lep*sf_lepTrack*sf_ewkV*sf_qcdV*sf_tt',topTagSF),
}
for x in ['singlemuontop','singleelectrontop']:
weights[x] = weights['top']
for x in ['singlemuonw','singleelectronw']:
weights[x] = weights['w']
for x in ['dimuon','dielectron']:
weights[x] = weights['notag']
for x in ['photon']:
weights[x] = tTIMES(photonSF,weights['notag'])
for r in ['signal','top','w','singlemuontop','singleelectrontop','singlemuonw','singleelectronw']:
for shift in ['BUp','BDown','MUp','MDown']:
for cent in ['sf_btag','sf_sjbtag']:
weights[r+'_'+cent+shift] = sub(cent+'0',cent+'0'+shift,sub(cent+'1',cent+'1'+shift,weights[r]))
| [
"sidn@mit.edu"
] | sidn@mit.edu |
3c945e482069d67f3eaa4d7602f1236c2061ba23 | 11aac6edab131293027add959b697127bf3042a4 | /smallestEqual.py | 6cd1e74db75a34d00ef3d959ad69280557da3d9b | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # https://leetcode.com/problems/smallest-index-with-equal-value/
class Solution(object):
def smallestEqual(self, nums):
for i, n in enumerate(nums):
if i % 10 == n:
return i
return -1
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
1a2655fce7a52f99d61f3d7ec9774ac9aaf13d41 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/_exercises/_templates/Python_OOP_Object_Oriented_Programming/Section 6/Encapsulation-Abstraction-Code/1 - Car - Public, Protected, Private.py | ac6327da0975cbcd8e5ce6f0771d3bc21e0fccb6 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 206 | py | # c_ Car
#
# ___ - ____ model year id_num engine_serial_num
# ____.? ?
# ____.? ?
# ____._? ?
# ____.__? ?
#
# my_car = ?("Escape", 2006, "44542", "201109048934242")
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
0123805e9d5a28bd98f12d4102161b84729e5a9b | 8802949d027efd1d31113e84adf8ffd28ec61ce8 | /parObj2.py | 59edfffd0d8a35d16b5fa5fe8cec539e87baafc4 | [] | no_license | AraiKensuke/EnDe | 46c006c12de0d498b6ec6e73e60c17384061c404 | 983268c82686ce18cc76c50837db9c0fc654f8e2 | refs/heads/master | 2020-04-15T15:50:23.494375 | 2017-09-17T01:48:17 | 2017-09-17T01:48:17 | 43,001,381 | 1 | 0 | null | 2017-07-17T21:20:46 | 2015-09-23T13:06:40 | Python | UTF-8 | Python | false | false | 945 | py | import numpy as np
import multiprocessing as mp
class Tester:
def __init__(self, tnum=-1):
self.num = tnum
self.num2 = 10*tnum
def modme(self, nn, val2=None):
self.num += nn
if val2 is not None:
print "Got non-None value for val2"
self.num2 = val2
#return self
return (nn+5)
def modhelp(test, name, *args, **kwargs):
callme = getattr(test, name)
callme(*args, **kwargs)#, kwargs)
return test
def modhelpSP(test, nn, name, **kwargs):
callme = getattr(test, name)
callme(nn, **kwargs)#, kwargs)
N = 2
p = mp.Pool(processes=N)
tts = _N.empty(N, dtype=object)
for nt in xrange(N):
tts[nt] = Tester(tnum=nt)
#modhelpSP(tts[nt], nt+5, "modme", val2=(nt*5))
results = _N.empty(N, dtype=object)
for nt in xrange(N):
kwds = {"val2" : (nt*5+1)}
results[nt] = p.apply_async(modhelp, args=(tts[nt], "modme", nt+5, ), kwds=kwds)
| [
"kensuke.y.arai@gmail.com"
] | kensuke.y.arai@gmail.com |
6a535746cb62942f89e980cc16df640f99038714 | 7f97814acd76ca96aee877fd70d401380f848fae | /6_training/count_nodes.py | 1b1f2165cfae52f23ecde1341a68c156a04aad7c | [] | no_license | tberhanu/all_trainings | 80cc4948868928af3da16cc3c5b8a9ab18377d08 | e4e83d7c71a72e64c6e55096a609cec9091b78fa | refs/heads/master | 2020-04-13T12:12:21.272316 | 2019-03-16T04:22:20 | 2019-03-16T04:22:20 | 163,195,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | """ 222. Count Complete Tree Nodes
Given a complete binary tree, count the number of nodes.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
lefty = self.countNodes(root.left)
righty = self.countNodes(root.right)
return 1 + lefty + righty
| [
"tberhanu@berkeley.edu"
] | tberhanu@berkeley.edu |
8cd50ada5edeea3845d7371bc4bedcfd0a7d7c28 | 32fd04b72bc5a039c11b6bacd98726cdcaec6d2c | /reduce_herschel_spectra/generate_averaged_hifi_spectra.py | 430db14f37cdca8bf50f27be3cf45431a9d60f1d | [] | no_license | tomr-stargazer/reduce_herschel_IRAS16293_spectra | 31657f08d018f71b93b4fee41f7d619b0fe114cf | 9c27e573140cfba2234a545f87b73b75624f9959 | refs/heads/master | 2021-09-07T17:07:58.294477 | 2018-02-26T15:57:54 | 2018-02-26T15:57:54 | 93,404,942 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | from __future__ import division
import os
import shutil
from gunzip_make_hifi import convert_FITS_to_HIFI
from combine_and_average import average_polarizations
list_of_bands = ["1a", "1b", "2a", "2b", "3a", "3b", "4a", "4b", "5a", "6a", "6b", "7a"]
root_directory_of_data = os.path.expanduser("~/Documents/Data/Herschel_Science_Archive/IRAS16293/")
level_2_5_data = os.path.join(root_directory_of_data, "level_2_5_all_bands")
target_location = os.path.join(root_directory_of_data, "Partially_Reduced_Spectra")
for band in list_of_bands:
data_location = os.path.join(level_2_5_data, band, "level2_5/myDecon/")
data_location_horizontal = os.path.join(data_location, "myDecon_WBS-H")
data_location_vertical = os.path.join(data_location, "myDecon_WBS-V")
convert_FITS_to_HIFI(data_location_horizontal, band+"-horizontal.hifi")
convert_FITS_to_HIFI(data_location_vertical, band+"-vertical.hifi")
averaged_file_fullpath = average_polarizations(data_location, band, clobber=True)
shutil.copy2(averaged_file_fullpath, target_location)
| [
"t.rice90@gmail.com"
] | t.rice90@gmail.com |
85dfdf77bfafd920d41772a4e965dcd760afef59 | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-tree/insufficient_nodes_in_root_to_leaf_paths.py | 193b8f01a891a48172b800a9a35adf7b8173daa7 | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 2,595 | py | # https://leetcode.com/problems/insufficient-nodes-in-root-to-leaf-paths
from TreeNode import TreeNode
class Solution:
# runtime; 92ms, 89.61%
# memory; 15MB, 100.00%
def sufficientSubset(self, root: TreeNode, limit: int) -> TreeNode:
if root is None:
return root
def calc(prevSum, n):
if n.left is None and n.right is None:
if prevSum + n.val < limit:
return None
return n
if n.left:
n.left = calc(prevSum + n.val, n.left)
if n.right:
n.right = calc(prevSum + n.val, n.right)
if n.left is None and n.right is None:
return None
return n
return calc(0, root)
s = Solution()
'''
_______1_______ 1
/ \ / \
_2_ __3__ 2 3
/ \ / \ / \
4 -99 -99 _7_ 4 7
/ \ / \ / \ / \ / \ \
8 9 -99 -99 12 13 -99 14 8 9 14
limit 1
'''
root1 = TreeNode(1)
root1.left = TreeNode(2)
root1.right = TreeNode(3)
root1.left.left = TreeNode(4)
root1.left.right = TreeNode(-99)
root1.right.left = TreeNode(-99)
root1.right.right = TreeNode(7)
root1.left.left.left = TreeNode(8)
root1.left.left.right = TreeNode(9)
root1.left.right.left = TreeNode(-99)
root1.left.right.right = TreeNode(-99)
root1.right.left.left = TreeNode(12)
root1.right.left.right = TreeNode(13)
root1.right.right.left = TreeNode(-99)
root1.right.right.right = TreeNode(14)
print(s.sufficientSubset(root1, 1))
'''
_5_ _5_
/ \ / \
4 8 4 8
/ / \ / / \
11 17 4 11 17 4
/ \ / \ / /
7 1 5 3 7 5
limit 22
'''
root2 = TreeNode(5)
root2.left = TreeNode(4)
root2.right = TreeNode(8)
root2.left.left = TreeNode(11)
root2.right.left = TreeNode(17)
root2.right.right = TreeNode(4)
root2.left.left.left = TreeNode(7)
root2.left.left.right = TreeNode(1)
root2.right.right.left = TreeNode(5)
root2.right.right.right = TreeNode(3)
print(s.sufficientSubset(root2, 22))
'''
1 1
/ \ \
2 -3 -3
/ / /
-5 4 4
limit -1
'''
root3 = TreeNode(1)
root3.left = TreeNode(2)
root3.right = TreeNode(-3)
root3.left.left = TreeNode(-5)
root3.right.left = TreeNode(4)
print(s.sufficientSubset(root3, -1))
| [
"agapelover4u@yahoo.co.kr"
] | agapelover4u@yahoo.co.kr |
cdb1d7dcec9622f8be7364b4bd8e96befbf01c13 | 2937d60b7f5259b4899ba5af08146bd874529a67 | /Assignment 8 q8.py | 0cdfc6c123fbcc437bc575427a7453354fc5e2ef | [] | no_license | gourav47/Let-us-learn-python | 9a2302265cb6c47e74863359c79eef5a3078358a | b324f2487de65b2f073b54c8379c1b9e9aa36298 | refs/heads/master | 2021-06-27T03:33:27.483992 | 2021-01-07T12:26:16 | 2021-01-07T12:26:16 | 204,323,390 | 1 | 1 | null | 2020-07-19T14:25:12 | 2019-08-25T16:53:56 | Python | UTF-8 | Python | false | false | 371 | py | '''compare two tuples, whether they contain the same element in any order or not'''
t1=eval(input("Enter the first tuple: "))
t2=eval(input("Enter the second tuple: "))
if t1==t2:
print("Tuples are same and are in same order")
else:
print("t2 is in t1" if all(e in t1 for e in t2) else "t1 is in t2" if all(e in t2 for e in t1) else "Tuples are not same")
| [
"noreply@github.com"
] | gourav47.noreply@github.com |
c2cdecfaf775fb1dd84f66cbf854dd5bbd4cb548 | 7d8e040cb703e6f6e2d55b5dc64fc9124d85dde8 | /skl2onnx/tutorial/benchmark.py | bcab7e44325c2f7077ac8d948ee6f1d583832bda | [
"MIT"
] | permissive | Global-localhost/sklearn-onnx | fc44aa481a91482f187cfd2307df6061b77742af | a8267e7ba946d8b0596951060e5dca39fec47439 | refs/heads/master | 2023-03-23T00:19:31.474251 | 2021-03-03T19:17:12 | 2021-03-03T19:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
Tools to help benchmarking.
"""
from timeit import Timer
import numpy
def measure_time(stmt, context, repeat=10, number=50, div_by_number=False):
"""
Measures a statement and returns the results as a dictionary.
:param stmt: string
:param context: variable to know in a dictionary
:param repeat: average over *repeat* experiment
:param number: number of executions in one row
:param div_by_number: divide by the number of executions
:return: dictionary
.. runpython::
:showcode:
from skl2onnx.tutorial import measure_time
from math import cos
res = measure_time("cos(x)", context=dict(cos=cos, x=5.))
print(res)
See `Timer.repeat <https://docs.python.org/3/library/
timeit.html?timeit.Timer.repeat>`_
for a better understanding of parameter *repeat* and *number*.
The function returns a duration corresponding to
*number* times the execution of the main statement.
"""
tim = Timer(stmt, globals=context)
res = numpy.array(tim.repeat(repeat=repeat, number=number))
if div_by_number:
res /= number
mean = numpy.mean(res)
dev = numpy.mean(res ** 2)
dev = (dev - mean**2) ** 0.5
mes = dict(average=mean, deviation=dev, min_exec=numpy.min(res),
max_exec=numpy.max(res), repeat=repeat, number=number)
return mes
| [
"noreply@github.com"
] | Global-localhost.noreply@github.com |
52baf0f97da8c5a0336484adf40ba0898bdf3efc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/34/usersdata/134/13382/submittedfiles/moedas.py | 0ccfc61b251ea5f113ad1ed38e1dc047d75fc972 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = int(input('Digite o valor de a:'))
b = int(input('Digite o valor de b:'))
c = int(input('Digite o valor de c:'))
R1=c//a
resto1=c%a
if resto1!=0:
R2=resto1//b
resto2=resto1%b
if resto2==0:
print ('%d' %R1)
print ('%d' %R2)
elif resto2!=0:
R3=c//b
resto3=c%b
if resto3==0:
print ('0')
print ('%d' %R3)
if resto3!=0:
R4= resto3//a
resto4=resto3%a
if resto4==0:
print ('%d' %R4)
print ('%d' %R3)
else:
print ('N')
elif resto1==0:
print ('%d' %R1)
print ('0')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c52253f69c5e40b0e961a6242e56af18848d6134 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_jackhammers.py | 0325943aafb2ceaa43fba5d39c112826adf81ec8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _JACKHAMMERS():
def __init__(self,):
self.name = "JACKHAMMERS"
self.definitions = jackhammer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['jackhammer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
70619d037c1670b6c631a93f650fc963cae1ae02 | b8e239b6d75fb88865ade7e355144fae49b4186f | /google-cloud-sdk/lib/googlecloudsdk/command_lib/eventarc/flags.py | 90db15a2b071bbbf007ea6320a995cd4f3159ab1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | pombredanne/gcloud_cli | d6a8b3ed4a28751b3264c7fefb7b43645c024478 | 7ca81d3a3689f41ce51c3c70805e6203f5b8944f | refs/heads/master | 2022-12-07T19:09:35.204462 | 2020-09-02T04:06:46 | 2020-09-02T04:06:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,485 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for Eventarc commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.calliope.concepts import deps
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import properties
_IAM_API_VERSION = 'v1'
def LocationAttributeConfig():
"""Builds an AttributeConfig for the location resource."""
return concepts.ResourceParameterAttributeConfig(
name='location',
fallthroughs=[
deps.PropertyFallthrough(properties.FromString('eventarc/location'))
],
help_text='The location for the Eventarc resource. Alternatively, set '
'the [eventarc/location] property.')
def TriggerAttributeConfig():
"""Builds an AttributeConfig for the trigger resource."""
return concepts.ResourceParameterAttributeConfig(name='trigger')
def ServiceAccountAttributeConfig():
"""Builds an AttributeConfig for the service account resource."""
return concepts.ResourceParameterAttributeConfig(name='service-account')
def AddLocationResourceArg(parser, group_help_text, required=False):
"""Adds a resource argument for an Eventarc location."""
resource_spec = concepts.ResourceSpec(
'eventarc.projects.locations',
resource_name='location',
locationsId=LocationAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG)
concept_parser = concept_parsers.ConceptParser.ForResource(
'--location', resource_spec, group_help_text, required=required)
concept_parser.AddToParser(parser)
def AddTriggerResourceArg(parser, group_help_text, required=False):
"""Adds a resource argument for an Eventarc trigger."""
resource_spec = concepts.ResourceSpec(
'eventarc.projects.locations.triggers',
resource_name='trigger',
triggersId=TriggerAttributeConfig(),
locationsId=LocationAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG)
concept_parser = concept_parsers.ConceptParser.ForResource(
'trigger', resource_spec, group_help_text, required=required)
concept_parser.AddToParser(parser)
def AddServiceAccountResourceArg(parser, required=False):
"""Adds a resource argument for an IAM service account."""
resource_spec = concepts.ResourceSpec(
'iam.projects.serviceAccounts',
resource_name='service account',
api_version=_IAM_API_VERSION,
serviceAccountsId=ServiceAccountAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG)
concept_parser = concept_parsers.ConceptParser.ForResource(
'--service-account',
resource_spec,
'The IAM service account associated with the trigger, specified with an '
'email address or a uniqueId. If not specified, the default compute '
'service account will be used. Unless a full resource name is provided, '
'the service account is assumed to be in the same project as the '
'trigger.',
required=required)
concept_parser.AddToParser(parser)
def AddMatchingCriteriaArg(parser, required=False):
"""Adds an argument for the trigger's matching criteria."""
parser.add_argument(
'--matching-criteria',
action=arg_parsers.UpdateAction,
type=arg_parsers.ArgDict(),
required=required,
help='The criteria by which events are filtered for the trigger, '
'specified as a comma-separated list of CloudEvents attribute names and '
'values. This flag can also be repeated to add more criteria to the '
'list. Only events that match with this criteria will be sent to the '
'destination. The criteria must include the `type` attribute, as well as '
'any other attributes that are expected for the chosen type.',
metavar='ATTRIBUTE=VALUE')
def AddDestinationRunServiceArg(parser, required=False):
"""Adds an argument for the trigger's destination Cloud Run service."""
parser.add_argument(
'--destination-run-service',
required=required,
help='The name of the Cloud Run fully-managed service that receives the '
'events for the trigger. The service must be in the same region as the '
'trigger unless the trigger\'s location is `global`. The service must be '
'in the same project as the trigger.')
def AddDestinationRunPathArg(parser, required=False):
"""Adds an argument for the trigger's destination path on the service."""
parser.add_argument(
'--destination-run-path',
required=required,
help='The relative path on the destination Cloud Run service to which '
'the events for the trigger should be sent. Examples: "/route", "route", '
'"route/subroute".')
def AddDestinationRunRegionArg(parser, required=False):
"""Adds an argument for the trigger's destination service's region."""
parser.add_argument(
'--destination-run-region',
required=required,
help='The region in which the destination Cloud Run service can be '
'found. If not specified, it is assumed that the service is in the same '
'region as the trigger.')
def AddClearServiceAccountArg(parser):
"""Adds an argument for clearing the trigger's service account."""
parser.add_argument(
'--clear-service-account',
action='store_true',
help='Clear the IAM service account associated with the trigger and use '
'the default compute service account instead.')
def AddClearDestinationRunPathArg(parser):
"""Adds an argument for clearing the trigger's destination path."""
parser.add_argument(
'--clear-destination-run-path',
action='store_true',
help='Clear the relative path on the destination Cloud Run service to '
'which the events for the trigger should be sent.')
| [
"actions@github.com"
] | actions@github.com |
c65489868b5614f72efd0a6d89aa72cd8eb91358 | 4e15720afdf1d90540acc38fbbbe26262a3b0842 | /favorite_team/urls.py | 7504e0307f32e294106b61614ffe387a2c6db10d | [] | no_license | rallen0150/bootstrap | 296eca9a2463fe7926c36c6263001197feae16df | f41e5d93af642845b7e5da284ee86b33a127f097 | refs/heads/master | 2021-01-11T00:35:43.341177 | 2016-10-10T23:36:56 | 2016-10-10T23:36:56 | 70,510,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | """favorite_team URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from app.views import index_view, about_view, record_view, player_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', index_view, name="index_view"),
url(r'^about/$', about_view, name="about_view"),
url(r'^record/$', record_view, name="record_view"),
url(r'^player/$', player_view, name="player_view")
]
| [
"rallen0150@gmail.com"
] | rallen0150@gmail.com |
4b0bb907945280b9d03a46f3176f94ee15c2bf9d | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_all_sp_Reorgs_qsplit_noscreen_new/Jobs/C60/C60_cation_neut_inner1_outer0/C60_cation_neut_inner1_outer0.py | ccebde61424c1f946c6e8be6a33acbe7cbdc10b0 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 7,149 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='C60_cation_neut_inner1_outer0'
#For crystals here, all cubic and centred at centre
insize=1
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='cation'
mols_cen=['sp_C60_mola_neut.xyz','sp_C60_molb_neut.xyz','sp_C60_molc_neut.xyz','sp_C60_mold_neut.xyz']
mols_sur=['sp_C60_mola_neut.xyz','sp_C60_molb_neut.xyz','sp_C60_molc_neut.xyz','sp_C60_mold_neut.xyz']
mols_outer=['sp_C60_mola_neut.xyz','sp_C60_molb_neut.xyz','sp_C60_molc_neut.xyz','sp_C60_mold_neut.xyz']
Natoms=60
#From cif:
'''
C60
_cell_length_a 14.052(5)
_cell_length_b 14.052(5)
_cell_length_c 14.052(5)
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_volume 2774.69
_cell_formula_units_Z 4
'''
#Get translation vectors:
a=14.0525/0.5291772109217
b=14.0525/0.5291772109217
c=14.0525/0.5291772109217
alpha=90*(pi/180)
beta=90*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=2774.69/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
print 'state',state
print 'q: ', qdict[state]
for atom in prot_neut_cry()._mols[0][prot_neut_cry()._cenpos[0]][prot_neut_cry()._cenpos[1]][prot_neut_cry()._cenpos[2]]():
atom()._crg=qdict[state]
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(jmtype='Stern',cutoff=0.)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg_shareq(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,jm=jm._m,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs_shareq[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs_shareq[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs_shareq()
print 'Job Completed Successfully.'
| [
"sheridan.few@gmail.com"
] | sheridan.few@gmail.com |
1f5a76e3f6844a408820537f70abaf3f2edeccb9 | 5643f360a6f57e3d904bed3d63ada7d2eeda20b3 | /unsupervised_learning/PCA.py | a92a707375a6887db188d66628ffab5e4a28df3b | [] | no_license | Abhi551/Machine-learning | 6b0f0530c9b78fa8fdf4c0da5aff680e2f236bf5 | 6f134830d4a0b038698df183f71cd118a93e1844 | refs/heads/master | 2021-10-23T03:16:08.815210 | 2019-03-14T13:10:44 | 2019-03-14T13:10:44 | 125,890,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,887 | py | ## unsupervised model used for extracting important variable from large set of variables
## in a data set .
## It extracts low dimensinonal set of features from a high dimensinonal dataset
## to capture as much information as possible
## best when 3 or more features are present in dataset
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from adspy_shared_utilities import plot_labelled_scatter
cancer_data = load_breast_cancer()
## returns data and target both using return_X_y
x_cancer , y_cancer = load_breast_cancer(return_X_y = True)
## performing preprocessing on the datasets
## so that each feature have zero mean and unit variance
scaler = StandardScaler()
x_fit = scaler.fit(x_cancer)
x_transform = x_fit.transform(x_cancer)
print (x_transform.shape)
## the final results will give the data which have zero mean and variance of data is unity
## specify the PCA object with 2 features to retain only
## and fitting the transformed data in PCA object
pca = PCA(n_components = 2).fit(x_transform)
print (pca)
## last step is to
## put the transformed data in the pca object to give the final transformed data
x_final = pca.transform(x_transform)
print (x_final.shape)
## using the same result on real world datasets
plot_labelled_scatter(x_final , y_cancer , ['malignant', 'benign'])
## creating a heatmap for each feature
## i.e. plotting the magnitude of each feature value for first 2 principal components
fig = plt.figure( figsize = (8,4) )
print (pca.components_.shape)
plt.imshow(pca.components_ , interpolation = 'none' , cmap = "plasma")
feature_names = list(cancer_data.feature_names)
plt.gca().set_xticks(np.arange(-.5 , len(feature_names)))
plt.gca().set_yticks(np.arange(.5 , 2 ))
plt.gca().set_xticklabels(feature_names , rotation = 90 , ha = "left" , fontsize = 12)
plt.gca().set_yticklabels(["First PC" , "Second PC"] , va = "bottom" , fontsize = 12)
plt.colorbar(orientation = "horizontal" , ticks = [pca.components_.min() , 0 ,
pca.components_.max()] , pad = .65)
plt.show()
## on fruits dataset
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from adspy_shared_utilities import plot_labelled_scatter
df = pd.read_csv('fruit_data_with_colors.txt', delimiter ="\t")
## preprocessing of data
x_fruits = df[['mass','width','height', 'color_score']]
y_fruits = df[['fruit_label']]
print (x_fruits.head())
scaler = StandardScaler()
x_fruits = scaler.fit(x_fruits).transform(x_fruits)
## using PCA
for i in range(2,5):
pca = PCA(n_components = 2).fit(x_fruits)
x_pca = pca.transform(x_fruits)
plot_labelled_scatter(x_pca , y_fruits , ["apple" , "mandarian" , "orange" , "lemon"])
| [
"abhichauhan551@gmail.com"
] | abhichauhan551@gmail.com |
62a8b9674aef0f3af6fd82b82dbf39558c49f35c | cc7d7f6128b81a959dffaf23627d7dfc95558209 | /ResNet50-2d/resnet.py | 667200d427262fecc78b6fa1025102ef5f07b55c | [] | no_license | abandonsea/M3D | 8fb7a9297789afab74dd3d0bb573583703932325 | 68afe5e79266caad9d9afa45fc9d754033b288d3 | refs/heads/master | 2022-11-23T07:51:34.680151 | 2020-07-22T00:59:33 | 2020-07-22T00:59:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,976 | py | import torch.nn as nn
import math, torch
import torch.utils.model_zoo as model_zoo
from torch.nn import init
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, train=True):
self.inplanes = 64
super(ResNet, self).__init__()
self.istrain = train
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d((16,8), stride=1)
self.num_features = 128
self.feat = nn.Linear(512 * block.expansion, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal(self.feat.weight, mode='fan_out')
init.constant(self.feat.bias, 0)
init.constant(self.feat_bn.weight, 1)
init.constant(self.feat_bn.bias, 0)
self.drop = nn.Dropout(0.5)
self.classifier = nn.Linear(self.num_features, num_classes)
init.normal(self.classifier.weight, std=0.001)
init.constant(self.classifier.bias, 0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.feat(x)
if self.istrain:
x = self.feat_bn(x)
x = self.relu(x)
x = self.drop(x)
x = self.classifier(x)
return x
def resnet50(pretrained='True', num_classes=1000, train=True):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes, train)
#if pretrained:
# model.load_state_dict('resnet50-19c8e357.pth')
weight = torch.load(pretrained)
static = model.state_dict()
for name, param in weight.items():
if name not in static:
print 'not load weight ', name
continue
if isinstance(param, nn.Parameter):
print 'load weight ', name, type(param)
param = param.data
static[name].copy_(param)
#model.load_state_dict(weight)
return model
| [
"noreply@github.com"
] | abandonsea.noreply@github.com |
cbe42e6c08a217ab9d3f9925b59403483b0cd28e | 929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c | /src/data_structure/hash_table/set.py | 7b7f0026e90534a21d8a0dfa4479732d254fb1b3 | [] | no_license | 1325052669/leetcode | fe7571a9201f4ef54089c2e078810dad11205b14 | dca40686c6a280bd394feb8e6e78d40eecf854b9 | refs/heads/master | 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | from typing import List
# https://leetcode.com/problems/happy-number/
class Solution202:
def isHappy(self, n: int) -> bool:
cycle = set()
while n != 1 and n not in cycle:
cycle.add(n)
n = sum(pow(int(i), 2) for i in str(n))
return n == 1
# https://leetcode.com/problems/longest-consecutive-sequence/
class Solution128:
def longestConsecutive(self, nums: List[int]) -> int:
nums_set = set(nums)
res = 0
for num in nums:
if num - 1 in nums_set:
continue
count = 0
curr = num
while curr in nums_set:
curr += 1
count += 1
res = max(res, count)
return res
# https://leetcode.com/problems/valid-sudoku/
class Solution36:
def isValidSudoku(self, board: List[List[str]]) -> bool:
rows = [set() for i in range(9)]
columns = [set() for i in range(9)]
boxes = [set() for i in range(9)]
# validate a board
for i in range(9):
for j in range(9):
num = board[i][j]
if num == '.': continue
num = int(num)
box_idx = (i // 3) * 3 + j // 3
if num in rows[i]: return False
rows[i].add(num)
if num in columns[j]: return False
columns[j].add(num)
if num in boxes[box_idx]: return False
boxes[box_idx].add(num)
return True | [
"js7995@nyu.edu"
] | js7995@nyu.edu |
88b52c6201b65b5762c9b91a6607157af7bc64bd | 548c18a693e4dd52765dcef0551e928a679aced7 | /practice prgms/prime numbers within an interval-simple program.py | e6d95912529a31cb3a0eed2030c83748d6a32837 | [] | no_license | iamsureshtumu/py-prgms | fd8517cd9f98b8b03bad358ac14f7abe58783428 | 56a619130d588356f9754d85339b6bdc3f645f5a | refs/heads/main | 2023-02-12T03:22:46.164020 | 2021-01-07T04:12:12 | 2021-01-07T04:12:12 | 327,499,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Python program to display all the prime numbers within an interval
start = 50
end = 100
#lower = int(input("Enter lower range: "))
#upper = int(input("Enter upper range: "))
print("Prime numbers between",start,"and",end,"are:")
for num in range(start,end + 1):
# prime numbers are greater than 1
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num)
| [
"sureshtumu3691@gmail.com"
] | sureshtumu3691@gmail.com |
b605952411c7c518079b629f18a9567374f734d1 | 7f98e3add3d755d81efa5becdf795532f886b119 | /datascraper/2-cleanDataset.py | 0881466924d6aaf19772fe0bf2947f3902cd42e7 | [] | no_license | fgolemo/steamGraph | 4e67d08bb111363def7e26c42ad1201a90ee9e9d | d4bd8e25d345ada6461fe94846ff303367313e66 | refs/heads/master | 2020-12-25T14:33:28.418661 | 2017-06-29T21:38:48 | 2017-06-29T21:38:48 | 67,958,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | import json
from time import strftime
from tqdm import tqdm
nodes = []
edges = []
sids = []
tags = set()
movies = 0
software = 0
total = 0
lowRating = 0
preset = "1k"
gamesToGet = {"1k": 1000, "3k": 3000}
with open('../public/data/steamGraph6k3-170629.json') as data_file:
data = json.load(data_file)
data = sorted(data, key=lambda k: k['players'], reverse=True)
count = 0
for item in tqdm(data):
total += 1
tagsTmp = [t.encode('ascii', 'ignore') for t in item["tags"]]
tags = set(tagsTmp + list(tags))
if "Movie" in tagsTmp or "Documentary" in tagsTmp or item["name"] == "Kung Fury":
movies+=1
continue
if "Software" in tagsTmp or "Utilities" in tagsTmp \
or "Game Development" in tagsTmp or "Video Production" in tagsTmp \
or "Design & Illustration" in tagsTmp or item["name"] == "Tilt Brush":
software+=1
continue
if item["players"] < 100: # this is for the 3K graph
lowRating +=1
continue
if count == gamesToGet[preset]:
break
count += 1
rating = item["rating"].encode('ascii', 'ignore')
if rating != "":
rating = int(rating)
else:
rating = -1
sid = item["id"].encode('ascii', 'ignore')
try:
sid = int(sid)
except ValueError:
urlParts = item['link'].split('/')
sid = int(urlParts[-1].encode('ascii', 'ignore'))
if sid in sids:
print item
continue
# if item['rank'] > 1000:
# continue
sids.append(sid)
itemClean = {
'players': item['players'],
'tags': tagsTmp,
'rating': rating,
'label': item['name'],#.encode('ascii', 'ignore'),
# 'rank': item["rank"],
'id': sid,
'link': item["link"].encode('ascii', 'ignore'),
'value':0
}
# print itemClean
nodes.append(itemClean)
for edge in [int(e.encode('ascii', 'ignore')) for e in item['related']]:
edgeClean = {
'id': '{}-{}'.format(sid, edge),
'from': sid,
'to': edge,
'value': 0
}
edgeExists = False
for otherEdge in edges:
if otherEdge['to'] == sid and otherEdge['from'] == edge:
edgeExists = True
break
if not edgeExists:
edges.append(edgeClean)
#{id: '1-3', from: 1, to: 3, value: 0}
edgesClean = []
for e in edges:
if e['to'] in sids and e['from'] in sids:
edgesClean.append(e)
with open('../public/data/steamNet'+preset+"-"+strftime("%y%m%d")+'.json', 'w') as f:
json.dump({'nodes': nodes, 'edges': edgesClean}, f)
#
# for t in tags:
# print t+""
print "\n"
print total
print lowRating
print movies
print software
| [
"fgolemo@gmail.com"
] | fgolemo@gmail.com |
bc85cc771df7166db948934998075f139f7db7fc | 0228b665c61661b634f10afce2f76f2777fa29c2 | /live_examples/create_steam.py | c3c4414aa46621c367578858ab18ba828039c2f8 | [
"MIT"
] | permissive | bernieyangmh/pili-sdk-python | 18c9e99f5dac194228e9d7a40aee556e1db05356 | aeef24ad9629bb2247aa89dd7bcc3b8fb0d6a58c | refs/heads/master | 2021-09-11T09:47:28.859714 | 2018-04-06T13:04:42 | 2018-04-06T13:04:42 | 112,150,168 | 0 | 0 | null | 2017-11-27T05:10:23 | 2017-11-27T05:10:23 | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
"""
https://developer.qiniu.com/pili/api/2515/create-a-flow
创建流
"""
from pili import Mac, Hub
# 替换成自己 Qiniu 账号的 AccessKey
access_key = "..."
# 替换成自己 Qiniu 账号的 SecretKey
secret_key = "..."
hub_name = "..."
stream_name = "..."
mac = Mac(access_key, secret_key)
hub = Hub(mac, hub_name)
resp = hub.create(key=stream_name)
print(resp.status_code)
print(resp.headers)
print(resp.text)
print(hub.get(stream_name))
| [
"berniey@163.com"
] | berniey@163.com |
2b204f0044e3ad68a5f22d8b9018bb35e8deba5b | a5bbf6ece66a39f92706c807874870cc048391d9 | /menus/migrations/0001_initial.py | e56c6c147d99f7baa19af7351230107558d4bc78 | [] | no_license | IsaacMorzy/wagtailblog | f96e921c1d07522fe2519f33daa5b19c3facbadb | ef372b85daed423431a4283fa8b5859512b97979 | refs/heads/master | 2022-12-15T02:22:09.366893 | 2020-05-13T10:44:34 | 2020-05-13T10:44:34 | 225,391,854 | 1 | 0 | null | 2022-12-08T03:17:39 | 2019-12-02T14:19:12 | CSS | UTF-8 | Python | false | false | 1,841 | py | # Generated by Django 2.2.8 on 2019-12-10 12:42
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_title', models.CharField(blank=True, max_length=50, null=True)),
('link_url', models.CharField(blank=True, max_length=500)),
('open_in_new_tab', models.BooleanField(blank=True, default=False)),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='menu_items', to='menus.Menu')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| [
"musyokaisaac98@gmail.com"
] | musyokaisaac98@gmail.com |
a4d6e4546903e3e9be31a70c20e61a8005a35805 | e0b607de0d1e91492b80369c5e8a6313372f9d29 | /app/views.py | 081983a94db7ca786b9542b6c0e4f8ec3c5089f1 | [] | no_license | surajkumarbhagat71/mathcalculation | 63a13473819657fa86136ce4593809f4129aa1f9 | 300850a574c60894a9bef57868816363f721775d | refs/heads/master | 2023-03-01T00:39:18.017426 | 2021-02-11T12:44:48 | 2021-02-11T12:44:48 | 338,026,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | from django.shortcuts import render,redirect
from django.views.generic import View
from django.db.models import Q
from .forms import *
from .models import *
# Create your views here.
class Signup(View):
def get(self,request):
form = UserForm()
return render(request,'signup.html',{"form":form})
def post(self,request):
form = UserForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('login')
class LoginView(View):
def get(self,request):
return render(request,'login.html')
def post(self,request,*args,**kwargs):
if request.method == 'POST':
username = self.request.POST.get('email')
password = self.request.POST.get('password')
cond = Q(email = username) & Q(password = password)
check =User.objects.filter(cond).count()
if (check == 1):
request.session['login'] = username
return redirect('cal')
else:
return redirect('login')
# def sum(x,n):
# total = 0
# for i in range(1,n+1):
# total+=1/(x**i)
# return total
#
# print(sum(1,3))
def Sum(x,n):
if n==1:
return 1/x
a = Sum(x,n-1)+1/(x**n)
return a
class Calculation(View):
def get(self,request,*args,**kwargs):
if not request.session.has_key('login'):
return redirect('login')
return render(request,'getdata.html')
def post(self,request,*args,**kwargs):
if not request.session.has_key('login'):
return redirect('login')
x = request.POST.get('x')
n = request.POST.get('n')
x = int(x)
n = int(n)
a = Sum(x,n)
data = {"result":a}
return render(request,'result.html',data)
| [
"surajkumarbhgat71@gmail.com"
] | surajkumarbhgat71@gmail.com |
fbbffd250cfe33d45e332eaa7c597c0cc338972e | ead82159a724b351e1c82d31e133f284db4d5d32 | /mymusic/models.py | 994b088f8ec81c252fb6b6b39ce9b64f73f7793f | [] | no_license | momentum-morehouse/django-music-genolajohnson | ff9d004aa556d5907be995f5257b57b312c10bc5 | 81beca64eed41fa454904fd4c3b44ae0092639b4 | refs/heads/master | 2022-11-29T15:37:14.711971 | 2020-07-17T00:49:43 | 2020-07-17T00:49:43 | 279,331,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
# Create your models here.
# class User(AbstractUser):
# pass
class Album(models.Model):
artist_name = models.CharField(max_length=255, null=True, blank=True)
title = models.CharField(max_length=255,null=True, blank=True)
released = models.DateField()
img_url = models.TextField(null= True, blank= True)
def __str__(self):
return f"{self.title} by {self.artist_name}"
# return f'{self.release}"
| [
"replituser@example.com"
] | replituser@example.com |
8fd2b1e1def0f43706a694e1453f6cb64f82ea8d | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f383826d76e7d7723b9e5eaee92778f5c7760d5d-<destination_to_network>-bug.py | 24707890dd6eefddbedc12c263c27707b5f7d95b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | def destination_to_network(self):
destination = self._values['destination']
if destination.startswith('default%'):
destination = '0.0.0.0%{0}/0'.format(destination.split('%')[1])
elif destination.startswith('default-inet6%'):
destination = '::%{0}/::'.format(destination.split('%')[1])
elif destination.startswith('default-inet6'):
destination = '::/::'
elif destination.startswith('default'):
destination = '0.0.0.0/0'
return destination | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
e86ffa15bdcd0373bf0c87c3468c1a69205de307 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/pki/csyncpolicy.py | e2f2771137084c404a13dafc1485c1815498a82a | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,431 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class CsyncPolicy(Mo):
"""
Used to control csync timeout and enable/disable.
"""
meta = ClassMeta("cobra.model.pki.CsyncPolicy")
meta.moClassName = "pkiCsyncPolicy"
meta.rnFormat = "csyncpolicy"
meta.category = MoCategory.REGULAR
meta.label = "File Synchronization Policy"
meta.writeAccessMask = 0x3
meta.readAccessMask = 0x3
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.pki.CsyncElement")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.pki.CsyncElement", "csyncelem-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.pki.Ep")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.pki.Definition")
meta.rnPrefixes = [
('csyncpolicy', False),
]
prop = PropMeta("str", "annotation", "annotation", 37511, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39650, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "interval", "interval", 1212, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(30, 600)]
prop.defaultValue = 30
prop.defaultValueStr = "30"
meta.props.add("interval", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 1221, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 1211, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "enabled"
prop._addConstant("disabled", "disabled", 0)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("state", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Fabric"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
b4ac39cb2ae3416400dad040fd16a6489b3b62d9 | e7cd87117f195d7e6d7e45ade1d07384a3f42303 | /tests/test_util.py | 35b141c44c722dd3c256e9ba10ff6df2bda88c08 | [
"MIT"
] | permissive | zaabjuda/prometheus_async | e80d1921b16ab46a3d7781d6e29d2734c58a6c2a | 6e139f7ed18157aea015ac6b0fe52860446d5c2f | refs/heads/master | 2021-01-16T17:39:26.903205 | 2016-02-17T10:52:34 | 2016-02-17T10:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from __future__ import absolute_import, division, print_function
try:
import asyncio
except ImportError:
asyncio = None
import time
import pytest
import six
from prometheus_async import _util
py2_only = pytest.mark.skipif(six.PY3, reason="Python 2-only test.")
py3_only = pytest.mark.skipif(six.PY2, reason="Python 3-only test.")
class TestMkTime(object):
@py2_only
def test_py2(self):
"""
Use monotonic.time on Python 2
"""
import monotonic
assert (
_util.get_time is
monotonic.time is
_util.mk_get_time()
)
@py3_only
def test_py3(self):
"""
Use time.perf_counter on Python 3
"""
assert (
_util.get_time is
time.perf_counter is
_util.mk_get_time()
)
| [
"hs@ox.cx"
] | hs@ox.cx |
37894e2994e54b788169167d818e84ef23dd93b4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03546/s628097135.py | 5e264a789848c60e87b361176cc71b0189cc755d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | import heapq
INF = 10**10
def dijkstra(s, G):
d = [INF] * len(G)
d[s] = 0
q = []
heapq.heapify(q)
heapq.heappush(q, (0, s))
while len(q) > 0:
shortest, v = heapq.heappop(q)
if d[v] < shortest:
continue
for e in G[v]:
to, cost = e
if d[to] > d[v] + cost:
d[to] = d[v] + cost
heapq.heappush(q, (d[to], to))
return d
H, W = map(int, input().split())
G = [[] for _ in range(10)]
for i in range(10):
adj = list(map(int, input().split()))
for j, cost in enumerate(adj):
# 1からの距離を計算したいから逆向きのグラフを考える
G[j].append((i, cost))
shortest_d = dijkstra(1, G)
ans = 0
for _ in range(H):
for x in list(map(int, input().split())):
if x == -1:
continue
ans += shortest_d[x]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6d10531aee49e9767663b286b0dedea028c51fe3 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_4445.py | 10e2b1dd0f52ad72a98232ad27da4b44cc449e2d | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((271, 941, 717), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((8, 644, 813), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((316, 838, 442), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((741, 292, 304), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((507, 483, 586), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((646, 492, 368), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((899, 918, 572), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((184, 328, 74), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((983, 171, 422), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((29, 726, 312), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((561, 783, 445), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((9, 467, 476), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((71, 450, 182), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((334, 50, 46), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((854, 299, 186), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((534, 723, 309), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((719, 189, 222), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((201, 474, 690), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((147, 331, 734), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((999, 507, 976), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((212, 142, 329), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
ef4566801b729677ae25b0866bd8d8593802a4ee | d37a19ab3bcaba6e808a18df411c653c644d27db | /Year1/ca116/lab10/prefix-2.py | 0f780e706f8656cab0331852654b89ce2999a848 | [] | no_license | Andrew-Finn/DCU | 9e7009dac9a543aaade17e9e94116259dcc1de20 | 013789e8150d80d3b3ce2c0c7ba968b2c69a7ce0 | refs/heads/master | 2023-02-21T05:13:42.731828 | 2022-02-14T12:39:20 | 2022-02-14T12:39:20 | 157,438,470 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
if __name__ == "__main__":
a = []
s = "mont"
lenght = len(s)
i = 0
while i < len(a):
word = a[i]
if word[:lenght] == s:
print
a[i]
i = len(a) + 1
i = i + 1
if i < len(s):
i = 1
| [
"git@afinn.me"
] | git@afinn.me |
49b43b95a19db5b65407a7b9cba11476f6bd9c45 | 9f2a0006322235db485912543565e090bccd0de7 | /pathConverter/pathConverter/wsgi.py | 6b29636d01ee8bc917ebf61cdc08d25f244b4307 | [] | no_license | xiaoxiaolulu/djangoConsolidate | 12aa1e0e50497eb3f58b47b9876074423c18e525 | 364bf9537112f4d39f7fb159a2eb6734e9540ec5 | refs/heads/master | 2021-01-02T03:49:40.176569 | 2020-02-17T17:21:05 | 2020-02-17T17:21:05 | 239,475,972 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for pathConverter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pathConverter.settings')
application = get_wsgi_application()
| [
"546464268@qq.com"
] | 546464268@qq.com |
36b6d3dba6ad5687fc85821c8dd5ce78b2bddf17 | e81d274d6a1bcabbe7771612edd43b42c0d48197 | /Python高级/day39(UDP、TCP回顾)/demo/02_Tcp Udp通信和实践/tcp服务器.py | d7ced7ae4a8bb0ee7037b8f670fc0e95579beef8 | [
"MIT"
] | permissive | ChWeiking/PythonTutorial | 1259dc04c843382f2323d69f6678b9431d0b56fd | 1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61 | refs/heads/master | 2020-05-15T00:50:10.583105 | 2016-07-30T16:03:45 | 2016-07-30T16:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from socket import *
tcpserver = socket(AF_INET,SOCK_STREAM)
tcpserver.bind(("",5551))
tcpserver.listen(5)
dat,ip = tcpserver.accept()
print(dat,ip)
tcpserver.close()
#<socket.socket fd=4, family=AddressFamily.AF_INET,
# type=SocketKind.SOCK_STREAM, proto=0, laddr=('192.168.14.85', 5551),
# raddr=('192.168.14.8', 52273)> ('192.168.14.8', 52273)
| [
"1025212779@qq.com"
] | 1025212779@qq.com |
054cb55739a73e35353e05b328c4cd6b763602ea | 7eea707a1d422b65353238c03a5a5d87c167cf64 | /urllibstart.py | c32bc78447f9fede675e763af539952e98792917 | [] | no_license | liberbell/py04 | 81eac41330ea7b4271661dc46d9888f74f17877c | 3118d5f19b1a5a356b215ec071642c3b97c61c88 | refs/heads/master | 2020-06-24T21:56:17.907409 | 2019-08-05T22:39:15 | 2019-08-05T22:39:15 | 199,102,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import urllib.request
def main():
url = "https://httpbin.org/xml"
result = urllib.request.urlopen(url)
print("Result code: {0}".format(result.status))
print("Headers:----------")
print(result.getheaders())
print("Returned data:----")
print(result.read().decode('UTF-8'))
if __name__ == '__main__':
main()
| [
"liberbell@gmail.com"
] | liberbell@gmail.com |
63ddf3acbbe69b137f1917be9d57e96c5d6984be | 6a82d489d993269be1560af0317b3d9098b603f9 | /exe43.py | 77aefadeebb5029ddc8dd53b75f10894dd4d0b0d | [] | no_license | andreplacet/reinforcement-python-3 | a06df30b2bf4314da3d7cb200f0c1937ade65a2a | 3e2dd8da00c4a32f29d237004aa52c7710fe2169 | refs/heads/master | 2023-01-01T18:17:49.604566 | 2020-10-30T17:33:16 | 2020-10-30T17:33:16 | 308,700,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | # Exercicio 43
codigos = [100, 101, 102, 103, 104, 105]
comidas = ['Cachorro Quente', 'Bauru Simples', 'Bauru com ovo', 'Hamburguer', 'ChesseBurguer', 'Refrigerante']
precos = [1.20, 1.30, 1.50, 1.20, 1.30, 1.0]
codigo = True
n_pedido = 0
pedido = []
while codigo != 0:
print(f'Pedido n°{n_pedido + 1}')
codigo = int(input("Digite o código do alimento: "))
if codigo == 0:
break
else:
while codigo not in codigos:
print('[Este código não corresponde a nenhum alimento.]')
codigo = int(input('Digite o código do alimento: '))
indice = codigos.index(codigo)
quantidade = int(input('Digite a quantidade: '))
valor_pedido = precos[indice] * quantidade
pedido.append(valor_pedido)
n_pedido += 1
pedido_nota = 0
for i in range(n_pedido - 1):
print(f'Pedido n°{pedido_nota + 1} = R$ {pedido[pedido_nota]:.2f}')
pedido_nota += 1
print(f'Total: R${sum(pedido):.2f}')
| [
"andreplacet@gmail.com"
] | andreplacet@gmail.com |
4b4f4c75b734be2e4e1d26389d83033b29ff6467 | add72f4d6f9f7af1f437d19213c14efb218b2194 | /icekit/plugins/links/migrations/0004_auto_20170314_1401.py | db3cd21e288b25c4f34168d17cda7de9e95d20b4 | [
"MIT"
] | permissive | ic-labs/django-icekit | 6abe859f97c709fcf51207b54778501b50436ff7 | c507ea5b1864303732c53ad7c5800571fca5fa94 | refs/heads/develop | 2022-08-08T21:26:04.144852 | 2018-01-08T02:55:17 | 2018-01-08T02:55:17 | 65,470,395 | 53 | 12 | MIT | 2022-07-06T19:59:39 | 2016-08-11T13:11:02 | Python | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ik_links', '0003_auto_20161117_1810'),
]
operations = [
migrations.AlterField(
model_name='articlelink',
name='style',
field=models.CharField(choices=[(b'', b'Normal')], max_length=255, verbose_name=b'Link style', blank=True),
),
migrations.AlterField(
model_name='authorlink',
name='style',
field=models.CharField(choices=[(b'', b'Normal')], max_length=255, verbose_name=b'Link style', blank=True),
),
migrations.AlterField(
model_name='pagelink',
name='style',
field=models.CharField(choices=[(b'', b'Normal')], max_length=255, verbose_name=b'Link style', blank=True),
),
]
| [
"greg@interaction.net.au"
] | greg@interaction.net.au |
7ee890327d38e18ac084687320b2116e85b2cc0b | f281c9ecd48aedd30469cfbd556bc3319cd8419d | /web_framework/src/router3.py | f4071387e051161aebb4b39a1463f5cc96e91535 | [] | no_license | youerning/blog | 5d5edeb4f836d233a4119796f38fc4e33531714e | 59c3704cf5a77bba70a48a5d09db9b165ea59d4b | refs/heads/master | 2023-08-31T04:08:16.461923 | 2023-08-27T01:28:39 | 2023-08-27T01:28:39 | 114,074,235 | 183 | 105 | null | 2023-05-05T02:36:52 | 2017-12-13T04:35:00 | HTML | UTF-8 | Python | false | false | 2,895 | py | # -*- coding: UTF-8 -*-
# @author youerning
# @email 673125641@qq.com
# 主要参考于: https://github.com/sirMackk/diy_framework/blob/master/diy_framework/application.py
import re
from collections import namedtuple
from functools import partial
from functools import wraps
SUPPORTED_METHODS = {"GET", "POST"}
Route = namedtuple("Route", ["methods", "pattern", "handler"])
class View:
pass
class Router(object):
def __init__(self):
self._routes = []
@classmethod
def build_route_regex(self, regexp_str):
# 路由的路径有两种格式
# 1. /home 这种格式没有动态变量, 返回^/home$这样的正则表达式
# 2. /item/{name} 这种格式用动态变量, 将其处理成^/item/(?P<name>[a-zA-Z0-9_-]+)$这种格式
def named_groups(matchobj):
return '(?P<{0}>[a-zA-Z0-9_-]+)'.format(matchobj.group(1))
re_str = re.sub(r'{([a-zA-Z0-9_-]+)}', named_groups, regexp_str)
re_str = ''.join(('^', re_str, '$',))
return re.compile(re_str)
@classmethod
def match_path(self, pattern, path):
match = pattern.match(path)
try:
return match.groupdict()
except AttributeError:
return None
def add_route(self, path, handler, methods=None):
if methods is None:
methods = {"GET"}
else:
methods = set(methods)
pattern = self.__class__.build_route_regex(path)
route = Route(methods, pattern, handler)
if route in self._routes:
raise Exception("路由重复了: {}".format(path))
self._routes.append(route)
def get_handler(self, method, path):
for route in self._routes:
if method in route.methods:
params = self.match_path(route.pattern, path)
if params is not None:
return partial(route.handler, **params)
return not_found
def route(self, path, methods=None):
def wrapper(handler):
# 闭包函数中如果有该变量的赋值语句,会认为是本地变量,就不上去上层找了
nonlocal methods
if callable(handler):
if methods is None:
methods = {"GET"}
else:
methods = set(methods)
self.add_route(path, handler, methods)
return handler
return wrapper
route = Router()
@route.route("/home")
def home():
return "home"
@route.route("/item/{name}", methods=["GET", "POST"])
def item(name):
return name
def not_found():
return "not found"
print(route.get_handler("GET", "/home")())
print(route.get_handler("POST", "/home")())
print(route.get_handler("GET", "/item/item1")())
print(route.get_handler("POST", "/item/item1")())
print(route.get_handler("GET", "/xxxxxx")()) | [
"673125641@qq.com"
] | 673125641@qq.com |
8b354179b8a8992d767d00a4edc6a9d8f61d5c3b | e1b8ae703c84f6a06dd3a3072cfa9afb7f9ebce7 | /accounts/views.py | 0173f6be5057efb35f374b1cdba19b08f80fd4c6 | [] | no_license | Code-Institute-Submissions/renanclothestore | 95a2a161f0f0046e328cb639a88ddaf6afaceae5 | ea295d1643b06a1f5cdbdbafcdbe767d2c286648 | refs/heads/master | 2020-03-26T12:34:21.946183 | 2018-08-13T21:40:09 | 2018-08-13T21:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,209 | py | from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from accounts.forms import UserRegistrationForm, UserLoginForm
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect
from django.template.context_processors import csrf
from django.conf import settings
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from models import User
import stripe
import arrow
import json
stripe.api_key = settings.STRIPE_SECRET
def register(request):
if request.method == 'POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
try:
customer = stripe.Customer.create(
email=form.cleaned_data['email'],
card=form.cleaned_data['stripe_id'],
plan='REG_MONTHLY',
)
if customer:
user = form.save()
user.stripe_id = customer.id
user.subscription_end = arrow.now().replace(weeks=+4).datetime
user.save()
user = auth.authenticate(email=request.POST.get('email'), password=request.POST.get('password1'))
if user:
auth.login(request, user)
messages.success(request, "You have successfully registered")
return redirect(reverse('profile'))
else:
messages.error(request, "We were unable to log you in at this time")
else:
messages.error(request, "We were unable to take payment from the card provided")
except stripe.error.CardError, e:
messages.error(request, "Your card was declined!")
else:
today = datetime.date.today()
form = UserRegistrationForm(initial={'expiry_month': today.month, 'expiry_year': today.year})
args = {'form': form, 'publishable': settings.STRIPE_PUBLISHABLE}
args.update(csrf(request))
return render(request, 'register.html', args)
@login_required(login_url='/accounts/login/')
def cancel_subscription(request):
try:
customer = stripe.Customer.retrieve(request.user.stripe_id)
customer.cancel_subscription(at_period_end=True)
except Exception, e:
messages.error(request, e)
return redirect('profile')
@csrf_exempt
def subscriptions_webhook(request):
event_json = json.loads(request.body)
# Verify the event by fetching it from Stripe
try:
# firstly verify this is a real event generated by Stripe.com
# commented out for testing - uncomment when live
# event = stripe.Event.retrieve(event_json['object']['id'])
cust = event_json['object']['customer']
paid = event_json['object']['paid']
user = User.objects.get(stripe_id=cust)
if user and paid:
user.subscription_end = arrow.now().replace(weeks=+4).datetime # add 4 weeks from now
user.save()
except stripe.InvalidRequestError, e:
return HttpResponse(status=404)
return HttpResponse(status=200)
@login_required(login_url='/login/')
def profile(request):
return render(request, 'profile.html')
def login(request):
if request.method == 'POST':
form = UserLoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(email=request.POST.get('email'),
password=request.POST.get('password'))
if user is not None:
auth.login(request, user)
messages.error(request, "You have successfully logged in")
return redirect(reverse('profile'))
else:
form.add_error(None, "Your email or password was not recognised")
else:
form = UserLoginForm()
args = {'form': form}
args.update(csrf(request))
return render(request, 'login.html', args)
def logout(request):
auth.logout(request)
messages.success(request, 'You have successfully logged out')
return render(request, 'index.html') | [
"renanzabeu@yahoo.it"
] | renanzabeu@yahoo.it |
8f404dd9f87feae9a0b2c22d54b7c1e5641b0c48 | 281fa4de7baa79587c7d1dedb63019627e429de0 | /lesson1/Hello_Input3.py | 531102bc4e305b9c90d292fbc657c41b9fc521ce | [] | no_license | vincenttuan/PythonCourse | a7b302324148633e84b9e6db3cc3b00eea8f08d4 | 648f342eb3c82b4bbd4e6575ef8e6e690322ce70 | refs/heads/master | 2020-04-18T17:32:55.734217 | 2019-03-30T08:31:13 | 2019-03-30T08:31:13 | 167,657,410 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # -*- coding:UTF-8 -*-
import math
h = 170.0
w = 60.0
bmi = w / ((h/100)**2)
print('身體評量指數 bmi = %.2f' % bmi)
bmi = w / math.pow(h/100, 2)
print('bmi = %.2f' % bmi)
print('h = %.2f, w = %.2f, bmi = %.2f' % (h, w, bmi))
print("h = {0}, w = {1}, bmi = {2}".format(h, w, bmi))
if (bmi >= 18.0 and bmi < 23) :
print('正常')
else :
print('不正常')
if 18 <= bmi < 23:
print('正常')
else:
print('不正常')
| [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
77ee955b11d10f5174a6ce0263c5c809f8f785ef | 7e395a7ac6abec3fe24f4ca02d5370f1c8fb3c17 | /DemoPrj_tent/employee_shared/views.py | 46ff1396326f2b0f49311dd202b3bb110073e9c8 | [] | no_license | udaykumaraodh/DjangoTentPrj | fbfe6929954846d3c9bc4815a06108eecf3ea54c | 53d8c518247666f7325bb55672819dce66bf89a9 | refs/heads/main | 2023-07-27T21:06:11.704280 | 2021-08-31T16:28:49 | 2021-08-31T16:28:49 | 401,743,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.db import connection
def empDetails(request):
with connection.cursor() as cursor:
cursor.execute('''Insert into employee_shared_empdetails(empno,ename,salary) values(103,'harish',30000.0) ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
def empUpd(request):
with connection.cursor() as cursor:
cursor.execute('''update employee_shared_empdetails set salary=salary+10000 where empno=103 ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
def empDel(request):
with connection.cursor() as cursor:
cursor.execute('''delete from employee_shared_empdetails where id=3 ''')
connection.commit()
cursor.execute('Select * from employee_shared_empdetails')
ds=cursor.fetchall()
return HttpResponse(ds)
# Create your views here.
| [
"udaykumarandolu@gmail.com"
] | udaykumarandolu@gmail.com |
743939c27c7e0e8d00a2487a97e1bdf562484341 | d3519a4d17c3a1097b6b16404d4657f3ab1035f7 | /env/gym_Rubiks_Cube/envs/rubiks_cube_env.py | 493f1074976e99b902f269699cccea4c176a2b66 | [] | no_license | AveyBD/rubiks-cube-ai | 1e7dc0d343e811d5fbe7dda989d61856266b9899 | a0f276ca022a579c6d1d75f817993b1dae44ff89 | refs/heads/master | 2020-09-28T07:08:57.938186 | 2019-12-08T19:31:19 | 2019-12-08T19:31:19 | 226,719,959 | 1 | 0 | null | 2019-12-08T19:30:10 | 2019-12-08T19:30:09 | null | UTF-8 | Python | false | false | 2,676 | py | import gym
from gym import spaces
import numpy as np
import random
from gym_Rubiks_Cube.envs import cube
actionList = [
'f', 'r', 'l', 'u', 'd', 'b',
'.f', '.r', '.l', '.u', '.d', '.b']
tileDict = {
'R': 0,
'O': 1,
'Y': 2,
'G': 3,
'B': 4,
'W': 5,
}
class RubiksCubeEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, orderNum=3):
# the action is 6 move x 2 direction = 12
self.action_space = spaces.Discrete(12)
# input is 9x6 = 54 array
self.orderNum = orderNum
low = np.array([0 for i in range(self.orderNum * self.orderNum * 6)])
high = np.array([5 for i in range(self.orderNum * self.orderNum * 6)])
self.observation_space = spaces.Box(low, high, dtype=np.uint8) # flattened
self.step_count = 0
self.scramble_low = 1
self.scramble_high = 10
self.doScamble = True
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
self.action_log.append(action)
self.ncube.minimalInterpreter(actionList[action])
self.state = self.getstate()
self.step_count = self.step_count + 1
reward = 0.0
done = False
others = {}
if self.ncube.isSolved():
reward = 1.0
done = True
if self.step_count > 40:
done = True
return self.state, reward, done, others
def reset(self):
self.state = {}
self.ncube = cube.Cube(order=self.orderNum)
if self.doScamble:
self.scramble()
self.state = self.getstate()
self.step_count = 0
self.action_log = []
return self.state
def getstate(self):
return np.array([tileDict[i] for i in self.ncube.constructVectorState()])
def render(self, mode='human', close=False):
if close:
return
self.ncube.displayCube(isColor=True)
def setScramble(self, low, high, doScamble=True):
self.scramble_low = low
self.scramble_high = high
self.doScamble = doScamble
def scramble(self):
# set the scramber number
scramble_num = random.randint(self.scramble_low, self.scramble_high)
# check if scramble
while self.ncube.isSolved():
self.scramble_log = []
for i in range(scramble_num):
action = random.randint(0, 11)
self.scramble_log.append(action)
self.ncube.minimalInterpreter(actionList[action])
def getlog(self):
return self.scramble_log, self.action_log
| [
"vivnps.verma@gmail.com"
] | vivnps.verma@gmail.com |
a46da7b7026d254511b4f87b98c7230d86a6ee3b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_299/ch153_2020_04_13_20_31_46_202522.py | 3cb9afb3697ff5ec456e21b36cec4d500a24ee1f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | def agrupa_por_idade(dicio):
criança = []
adolescente = []
adulto = []
idoso = []
dicionovo = {'crianças':criança, 'adolescentes':adolescente, 'adultos':adulto, 'idosos':idoso}
for nome , idade in dicio.items():
if idade <= 11:
criança.append(nome)
elif idade <= 17:
adolescente.append(nome)
elif idade <= 59:
adulto.append(nome)
else:
idoso.append(nome)
dicionovo['crianças'] = criança
dicionovo['adolescentes'] = adolescente
dicionovo['adultos'] = adulto
dicionovo['idosos'] = idoso
return dicionovo | [
"you@example.com"
] | you@example.com |
94ccbc6af940e19a5187ad8c2c0913db8af83b8d | 04a77043cebd9415069aad4a6b8e7af077de1168 | /2-python_opp/day04/Python_OO4/with.py | f38bdaf9633ae0eaac7bd82d4b73c4c65fce139f | [] | no_license | yangxiangtao/biji | a935fbc4af42c81205900cb95a11e98c16d739de | 5c5f46e6c145fc02ea10b7befdc05c489fc3b945 | refs/heads/master | 2022-11-12T02:25:51.532838 | 2019-04-02T01:22:12 | 2019-04-02T01:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # with.py
# with语句示例
# try:
# #f = open("aaa.txt", "rt")
# # 使用with语句,不管以下的操作是否
# # 发生异常,都能保证文件被正确关闭
# with open("a.txt", "rt") as f:
# for line in f:
# print(line,end="")
# # with语句结束
# except:
# print("文件操作失败")
class A: #自定义资源管理器
def __init__(self, name):
self.name = name
def __enter__(self):
print("__enter__()方法被执行")
return self
def __exit__(self, exc_type, exc_val,exc_tb):
print("__exit__()方法被执行")
if exc_type is None: #没有出现异常
print("没有出现异常")
else: # 出现异常
print("错误类型:", exc_type)
print("错误对象:", exc_val)
print("TraceBack:", exc_tb)
if __name__ == "__main__":
with A("test_name") as a:
print("with语句执行了")
# 制造或不制造异常
a = int(input("请输入一个数字:"))
print("程序退出") | [
"123@bb.com"
] | 123@bb.com |
d41d78e8572fe2ff96675b55eb8b75fc96cc4b9a | a5e6ce10ff98539a94a5f29abbc053de9b957cc6 | /competition/20191124/d.py | fb87af6d47c3ddbc8bf896989ae9b64796fbfd70 | [] | no_license | shimaw28/atcoder_practice | 5097a8ec636a9c2e9d6c417dda5c6a515f1abd9c | 808cdc0f2c1519036908118c418c8a6da7ae513e | refs/heads/master | 2020-07-26T10:59:51.927217 | 2020-06-13T11:53:19 | 2020-06-13T11:53:19 | 208,622,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | N = int(input())
g = []
for _ in range(N-1):
a, b = map(int, input().split())
g.append((a,b))
d = {}
for i in range(1, N+1):
d[i] = []
d_lines = {}
col = [1]
n_cols = 1
d[g[0][0]].append(1)
d[g[0][1]].append(1)
d_lines[1] = [g[0][0], g[0][1]]
for gi in g[1:]:
a, b = gi[0], gi[1]
n = 1
while True:
if n not in d[a]:
break
else:
n += 1
col.append(n)
n_cols = max(n_cols, n)
d[a].append(n)
d[b].append(n)
print(n_cols)
for c in col:
print(c)
| [
"shima.w28@gmail.com"
] | shima.w28@gmail.com |
b529a2465a28b369f9bc16606c3b1b5d712fb008 | 7855dfd58df154a6f0a0d0939630fbc3ca24b0c1 | /memegen/__init__.py | 159ae96c2d670c3110c7aaedebcf52706c2a278c | [
"MIT"
] | permissive | neufeldtech/memegen | f17c7b4c9bcb849fc0e021a444eb5678ab7af06e | 1375c7ca88b44fc435bb55992b5ef6b7ad882475 | refs/heads/master | 2020-12-25T15:51:08.260488 | 2016-06-06T01:38:11 | 2016-06-06T11:29:14 | 52,167,031 | 1 | 0 | null | 2016-02-20T17:56:40 | 2016-02-20T17:56:39 | null | UTF-8 | Python | false | false | 296 | py | """Package for MemeGen."""
import sys
__project__ = 'MemeGen'
__version__ = '2.1'
VERSION = "{} v{}".format(__project__, __version__)
PYTHON_VERSION = 3, 5
if sys.version_info < PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
101d748a0568052170a6d7e693048fe769ade8ae | 7cce9875553a31b2ef2253792d46c488a8e5acb7 | /keras/keras12_split2_pratice.py | eba9f6c7ecb0b719832496daab847b8985cdac43 | [] | no_license | qzson/Study | 8498f0b1612fb2f7947d5067b7275c338e0216c6 | 4b214e9efb0ad7a5e11bca58fd311ee66200ad5e | refs/heads/master | 2022-12-01T15:16:00.966115 | 2020-08-14T08:16:42 | 2020-08-14T08:16:42 | 263,221,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # split 개인 연습 파일
# 1. train:val:test = 6:2:2 데이터 분리
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.6)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 0.5) # 40% 중 절반 = 20%
print(x_train)
print(x_val)
print(x_test)
# 2. 8:1:1
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.8)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 0.5)
print(x_train)
print(x_val)
print(x_test)
# 3. 7:1:2
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.7)
x_val, x_test, y_val, y_test = train_test_split(
x_test, y_test, shuffle = False,
test_size = 1/3)
# 질문 : 그냥 1:2로 나누는 과정에서 나머지는 자동으로 분류되나요?
# 답변 : test_size 에서 test가 1/3으로 할당을 했으니, 나머지는 2/3으로 자동으로 연산
print(x_train)
print(x_val)
print(x_test)
# 4. 둘 중 하나만 써도 된다.
import numpy as np
x = np.array(range(1,101))
y = np.array(range(101,201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, shuffle = False,
train_size = 0.6, test_size = 0.4)
# <구자님 부연설명 참조>
# train_size와 test_size를 둘 다 사용해도 되고, 둘 중 하나만 사용해도 됨
# 단, train_size + test_size = sum > 1 이면 에러 뜸
# sum < 1 이면 빠진 값 만큼 날아감
# ex) train_size = 0.6, test_size = 0.3 이면 sum = 0.9로 0.1만큼의 값이 사라진다.
# train_size = 0.6, test_size = 0.4 [가능]
# train_size = 0.6, test_size = 0.3 [나머지 10%는 어디루?]
# train_size = 0.6, test_size = 0.5 [Error 발생]
print(x_train)
print(x_test) | [
"qzson@naver.com"
] | qzson@naver.com |
d16de8d3bf1afedbf28f3c59adba5016a078d48b | bc368e94d950af97b71e0b0c2a3d2b86c6a9d173 | /learn-theano/learn/nn2.py | 1d24fcba78306ab8cb4597b321cc554f35b414ff | [] | no_license | ChenLiangbo/Learning-python | 6100c6ff74330fb1710622cdb22bde5a1d52e40b | 868cc4d71d1b9bd362b9fac8a39f295037d20b4c | refs/heads/master | 2020-06-11T07:42:04.434182 | 2018-05-08T05:06:14 | 2018-05-08T05:06:14 | 75,731,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | #!usr/bin/env/python
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
from theano import function
from theano.ifelse import ifelse
import numpy as np
from random import random
print "strat build model ..."
# http://www.tuicool.com/articles/M7FRziR
# 定义变量:
x = T.matrix('x')
w1 = theano.shared(np.array([random(),random()]))
w2 = theano.shared(np.array([random(),random()]))
w3 = theano.shared(np.array([random(),random()]))
b1 = theano.shared(1.)
b2 = theano.shared(1.)
learning_rate = 0.01
a1 = 1/(1+T.exp(-T.dot(x,w1)-b1))
print "a2 = ",type(a1)
print dir(a1)
print "-"*80
print "ndim = ",a1.ndim
# print "get_scalar_constant_value = ",a1.get_scalar_constant_value()
print "dtype = ",a1.dtype
a2 = 1/(1+T.exp(-T.dot(x,w2)-b1))
x2 = T.stack([a1,a2],axis=1)
a3 = 1/(1+T.exp(-T.dot(x2,w3)-b2))
a_hat = T.vector('a_hat') #Actual output
cost = -(a_hat*T.log(a3) + (1-a_hat)*T.log(1-a3)).sum()
dw1,dw2,dw3,db1,db2 = T.grad(cost,[w1,w2,w3,b1,b2])
train = function(
inputs = [x,a_hat],
outputs = [a3,cost],
updates = [
[w1, w1-learning_rate*dw1],
[w2, w2-learning_rate*dw2],
[w3, w3-learning_rate*dw3],
[b1, b1-learning_rate*db1],
[b2, b2-learning_rate*db2]
]
)
inputs = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
outputs = [1,0,0,1]
print "start training ..."
# 遍历输入并计算输出:
cost = []
for iteration in range(30000):
print "iteration = ",iteration
pred, cost_iter = train(inputs, outputs)
cost.append(cost_iter)
break
# 打印输出
print 'The outputs of the NN are:'
for i in range(len(inputs)):
print 'The output for x1=%d | x2=%d is %.2f' % (inputs[i][0],inputs[i][1],pred[i])
# 绘制损失图:
print '\nThe flow of cost during model run is as following:'
import matplotlib.pyplot as plt
# plt.plot(cost)
# plt.show() | [
"chenlb@polarwin.cn"
] | chenlb@polarwin.cn |
13bb30d8a590842169beb86a035435accff49d55 | add0bb7a309ea346614d7f560a24e653d3d0ff67 | /pythonbase/数据解析/2.正则解析_分页爬取.py | fdab3e17bf3c37d2d29e351313833ffb684b8d18 | [] | no_license | 1572903465/PythonProjects | 935aff08d5b3d3f146393764a856369061513d36 | 73576080174f72ea1df9b36d201cf3949419041b | refs/heads/master | 2023-06-10T15:50:49.178112 | 2021-07-05T15:42:53 | 2021-07-05T15:42:53 | 301,328,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | # 需求爬取糗事百科中糗图模块下的所有图片图片
import requests
import re
import os
if __name__ == '__main__':
#创建一个文件加, 保存所有的图片
if not os.path.exists('./qiutuLibs'):
os.mkdir('./qiutuLibs')
# 设置一个通用的url模板
url = 'https://www.qiushibaike.com/imgrank/page/%d/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'
}
# pageNum = 1
for pageNum in range(1,5):
#对应页码的url
new_url = format(url%pageNum)
# 使用通用怕从对url对应的一整张页面进行爬取
page_text = requests.get(url=new_url,headers=headers).text
# 使用聚焦爬虫将页面的所有糗图进行解析/爬取
ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'
img_src_list = re.findall(ex,page_text,re.S)
print(img_src_list)
for src in img_src_list:
#拼接出一个完整的图片url
src = 'https:'+src
# 请求到了图片的二进制数据
img_data = requests.get(url=src,headers=headers).content
#生成图片名称
img_name=src.split('/')[-1]
imgPath = './qiutuLibs/'+img_name
with open(imgPath,'wb') as fp:
fp.write(img_data)
print(img_name+'爬取成功')
| [
"1572903465@qq.com"
] | 1572903465@qq.com |
4232950bb6747d83d1fbf6623c4a0579313b9c14 | eef659a707d87e979741cc11ad59344c911790f5 | /cc3/rest/serializers.py | 3ab26544ea65b8b75307be53466128327e97be3c | [] | no_license | qoin-open-source/samen-doen-cc3 | 1e5e40a9b677886aa78f980670df130cbbb95629 | 8b7806177e1e245af33b5112c551438b8c0af5d2 | refs/heads/master | 2020-05-04T02:26:07.039872 | 2019-04-02T21:19:54 | 2019-04-02T21:19:54 | 178,926,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from rest_framework import serializers
from cc3.cards.models import Card
class CardSerializer(serializers.ModelSerializer):
class Meta:
model = Card
fields = (
'card_type',
'number',
'card_security_code',
'creation_date',
'activation_date',
'expiration_date',
'card_security_code_blocked_until',
'owner',
'status',
)
| [
"stephen.wolff@qoin.com"
] | stephen.wolff@qoin.com |
537c884c6295315906c8b48d238b4689ecbbad55 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/async_samples/sample_query_text_async.py | a34195f7e320e51f4760c054540dd55b0ae6b5c0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 2,419 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_query_text_async.py
DESCRIPTION:
This sample demonstrates how to ask a question from supplied text data.
USAGE:
python sample_query_text_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
"""
import asyncio
async def sample_query_text():
# [START query_text_async]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering import models as qna
endpoint = os.environ["AZURE_QUESTIONANSWERING_ENDPOINT"]
key = os.environ["AZURE_QUESTIONANSWERING_KEY"]
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
async with client:
input = qna.TextQueryOptions(
question="How long it takes to charge surface?",
records=[
qna.TextRecord(
text="Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
"It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
id="doc1"
),
qna.TextRecord(
text="You can use the USB port on your Surface Pro 4 power supply to charge other devices, like a phone, while your Surface charges. " +
"The USB port on the power supply is only for charging, not for data transfer. If you want to use a USB device, plug it into the USB port on your Surface.",
id="doc2"
)
]
)
output = await client.query_text(input)
best_answer = [a for a in output.answers if a.confidence_score > 0.9][0]
print("Q: {}".format(input.question))
print("A: {}".format(best_answer.answer))
# [END query_text_async]
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(sample_query_text())
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
9c004b8f8bc3f269da8eea1ebaae63fe378d510a | 31cc708f7e38017073cb148f33393aed879e27bb | /blog/migrations/0001_initial.py | 0c440763cc8ed74b8afb6998860ff8c7b0306564 | [] | no_license | rkdwldnjs1/My-first-blog | 9a38820a1f9211052ff491945d7cd366a07b6783 | f43b28a2a3eb00d70b326c103e3ae311299b4210 | refs/heads/master | 2020-03-22T01:35:19.576412 | 2018-07-01T09:14:50 | 2018-07-01T09:14:50 | 139,315,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 19:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"you@example.com"
] | you@example.com |
83f869d6971a120fc931416ae7e1f6cc3824e0ea | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/website_crm/models/__init__.py | 266fafb332a6569b8e611cb61511f4b2ae4a4946 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from . import crm_lead
from . import res_config_settings
from . import website_visitor
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
c99b8bdaa5e8f2344cdc98648368b6410b06dfad | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve/client/script/ui/shared/messagebox.py | d97d98c5ea0fb0485ea02234e8cd716ccb54e7bb | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | #Embedded file name: eve/client/script/ui/shared\messagebox.py
import uiprimitives
import uicontrols
import uiutil
import uthread
import uicls
import carbonui.const as uiconst
import localization
class MessageBox(uicontrols.Window):
__guid__ = 'form.MessageBox'
__nonpersistvars__ = ['suppress']
default_width = 340
default_height = 210
default_alwaysLoadDefaults = True
def ApplyAttributes(self, attributes):
uicontrols.Window.ApplyAttributes(self, attributes)
self.suppress = 0
self.name = 'modal'
self.scope = 'all'
self.edit = None
self.sr.main.clipChildren = True
def Execute(self, text, title, buttons, icon, suppText, customicon = None, height = None, default = None, modal = True, okLabel = None, cancelLabel = None):
self._Execute(title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel)
if text:
text = text.replace('\r', '').replace('\n', '')
edit = uicls.EditPlainText(parent=self.sr.main, padding=const.defaultPadding, readonly=1)
self.edit = edit
uthread.new(self.SetText, text)
def ExecuteCustomContent(self, customContentCls, title, buttons, icon, suppText, customicon = None, height = None, default = None, modal = True, okLabel = None, cancelLabel = None, messageData = None):
self._Execute(title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel)
customContent = customContentCls(parent=self.sr.main, padding=const.defaultPadding, messageData=messageData, align=uiconst.TOTOP)
self.height = customContent.GetContentHeight() + 110
def _Execute(self, title, buttons, icon, suppText, customicon, height, default, modal, okLabel, cancelLabel):
if height is None:
height = 210
self.MakeUnMinimizable()
self.HideHeader()
self.SetMinSize([340, height])
self.DefineIcons(icon, customicon)
if title is None:
title = localization.GetByLabel('UI/Common/Information')
self.sr.main = uiutil.FindChild(self, 'main')
caption = uicontrols.EveCaptionLarge(text=title, align=uiconst.CENTERLEFT, parent=self.sr.topParent, left=64, width=270)
self.SetTopparentHeight(max(56, caption.textheight + 16))
self.DefineButtons(buttons, default=default, okLabel=okLabel, cancelLabel=cancelLabel)
if suppText:
self.ShowSupp(suppText)
if modal:
uicore.registry.SetFocus(self)
def ShowSupp(self, text):
bottom = uiprimitives.Container(name='suppressContainer', parent=self.sr.main, align=uiconst.TOBOTTOM, height=20, idx=0)
self.sr.suppCheckbox = uicontrols.Checkbox(text=text, parent=bottom, configName='suppress', retval=0, checked=0, groupname=None, callback=self.ChangeSupp, align=uiconst.TOPLEFT, pos=(6, 0, 320, 0))
bottom.height = max(20, self.sr.suppCheckbox.height)
def ChangeSupp(self, sender):
self.suppress = sender.checked
def SetText(self, txt):
self.edit.SetValue(txt, scrolltotop=1)
def CloseByUser(self, *etc):
if self.isModal:
self.SetModalResult(uiconst.ID_CLOSE)
else:
uicontrols.Window.CloseByUser(self)
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
ac325ec992292cfb4f5972a30cef9de1144e0730 | 47128c6ff1277eedf851670d33f7a288fdfe2246 | /redis database/redis_hashs.py | 972824bc49c27750abbba4a168846bf52f541d75 | [] | no_license | chati757/python-learning-space | 5de7f11a931cf95bc076473da543331b773c07fb | bc33749254d12a47523007fa9a32668b8dc12a24 | refs/heads/master | 2023-08-13T19:19:52.271788 | 2023-07-26T14:09:58 | 2023-07-26T14:09:58 | 83,208,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import redis
redis=redis.StrictRedis(
host='localhost',
port=6379,
db=0,
password=8182757
)
'''
hash value
-hset <myhash> <field> <hashvalue>
-hget <myhash> <field>
-hmset <myhash> <field1> <hashvalue> <field2> <hashvalue2> ..
-hmget <myhash> <field1> <field2> ..
-hgetall <myhash>
'''
#hset
print("hset")
redis.hset("myhash","myfield","hashvalue")
#hget
print("hget")
test=redis.hget("myhash","myfield")
print(test)
#hmset
print("hmset")
redis.hmset("myhash2",{"test1":"test1","test2":"test2"})
#hmget
print("hmget")
test2=redis.hmget("myhash2",{"test1","test2"})
print(test2)
| [
"chati757@users.noreply.github.com"
] | chati757@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.