blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a83480c6739ebf70ec159aa58c09e4f4c732008 | 9b9b6a7aa1de1092a8480771f2b08ffa0972218d | /python/foo/lambda/filter.py | 71efd0608d9c3078d544f9c1b2016a4d61a7fb8e | [
"WTFPL"
] | permissive | lijiansong/lang | c42ca757306b38f37a26fef841b2460f05a13af6 | 27ffecd9afe67ddac003fc4d6333e06e2cc20434 | refs/heads/master | 2023-02-25T17:36:01.221720 | 2023-02-14T14:10:29 | 2023-02-14T14:10:29 | 149,586,739 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | #!/usr/bin/env python
if __name__ == '__main__':
fibonacci = [0,1,1,2,3,5,8,13,21,34,55]
odd_numbers = list(filter(lambda x: x % 2, fibonacci))
print(odd_numbers)
even_numbers = list(filter(lambda x: x % 2 == 0, fibonacci))
print(even_numbers)
even_numbers = list(filter(lambda x: x % 2 -1, fibonacci))
print(even_numbers)
| [
"lijiansong@ict.ac.cn"
] | lijiansong@ict.ac.cn |
76ce7f008366b7ae6851bacf9174433b87f39e21 | b9aeae1212f153e73ad034cda3bc0057b97ce893 | /signbank/tools.py | f62254b4c41ac0b93e6fdb1198450d2154e54755 | [
"BSD-3-Clause"
] | permissive | ISOF-ITD/FinSL-signbank | e76e412d01db5ee0a59b7d962a4c0d62b8b528ca | 439fad48c67d3df031e479f2f71c1d6765d7e1f5 | refs/heads/master | 2022-11-29T13:46:10.733880 | 2018-06-15T09:45:33 | 2018-06-15T09:45:33 | 129,242,415 | 0 | 0 | BSD-3-Clause | 2018-04-12T11:44:26 | 2018-04-12T11:44:26 | null | UTF-8 | Python | false | false | 5,174 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.contrib.auth.decorators import permission_required
from django.contrib import messages
from django.shortcuts import render
from django.db.models import Prefetch, Q
from django.db import connection
from django.urls import reverse
from django.core.mail import mail_admins
from django.utils.translation import ugettext as _
from django.conf import settings
from signbank.dictionary.models import Gloss, Language, Translation, Keyword, Dataset, GlossRelation
from signbank.video.models import GlossVideo
@permission_required("dictionary.search_gloss")
def infopage(request):
context = dict()
context["gloss_count"] = Gloss.objects.all().count()
context["glossvideo_count"] = GlossVideo.objects.all().count()
context["glosses_with_video"] = GlossVideo.objects.filter(gloss__isnull=False).order_by("gloss_id")\
.distinct("gloss_id").count()
context["glossless_video_count"] = GlossVideo.objects.filter(gloss__isnull=True).count()
context["glossvideo_poster_count"] = GlossVideo.objects.exclude(Q(posterfile="") | Q(posterfile__isnull=True))\
.count()
context["glossvideo_noposter_count"] = context["glossvideo_count"] - context["glossvideo_poster_count"]
context["languages"] = Language.objects.all().prefetch_related("translation_set")
context["keyword_count"] = Keyword.objects.all().count()
datasets_context = list()
datasets = Dataset.objects.all().prefetch_related("gloss_set", "translation_languages")
for d in datasets:
dset = dict()
dset["dataset"] = d
dset["gloss_count"] = Gloss.objects.filter(dataset=d).count()
dset["glossvideo_count"] = GlossVideo.objects.filter(gloss__dataset=d).count()
dset["glosses_with_video"] = GlossVideo.objects.filter(gloss__isnull=False, gloss__dataset=d)\
.order_by("gloss_id").distinct("gloss_id").count()
dset["glossless_video_count"] = GlossVideo.objects.filter(gloss__isnull=True, dataset=d).count()
dset["glossvideo_poster_count"] = GlossVideo.objects.filter(dataset=d).exclude(
Q(posterfile="") | Q(posterfile__isnull=True)).count()
dset["glossvideo_noposter_count"] = dset["glossvideo_count"] - dset["glossvideo_poster_count"]
dset["translations"] = list()
for language in d.translation_languages.all().prefetch_related(
Prefetch("translation_set", queryset=Translation.objects.filter(gloss__dataset=d))):
dset["translations"].append([language, language.translation_set.count()])
datasets_context.append(dset)
# For users that are 'staff'.
if request.user.is_staff:
# Find missing files
problems = list()
for vid in GlossVideo.objects.all():
if vid.videofile and not os.path.isfile(vid.videofile.path):
problems.append({"id": vid.id, "file": vid.videofile, "type": "video", "url": vid.get_absolute_url()})
if vid.posterfile and not os.path.isfile(vid.posterfile.path):
problems.append({"id": vid.id, "file": vid.posterfile, "type": "poster",
"admin_url": reverse("admin:video_glossvideo_change", args=(vid.id,))})
context["problems"] = problems
# Only do this if the database is postgresql.
if settings.DB_IS_PSQL:
# Get postgresql database size and calculate usage percentage.
with connection.cursor() as cursor:
cursor.execute("SELECT pg_database_size(%s)", [settings.PSQL_DB_NAME])
psql_db_size = cursor.fetchone()[0]
cursor.execute("SELECT pg_size_pretty(pg_database_size(%s))", [settings.PSQL_DB_NAME])
psql_db_size_pretty = cursor.fetchone()[0]
context["psql_db_size"] = psql_db_size
context["psql_db_size_pretty"] = psql_db_size_pretty
# Calculate the usage percentage.
usage_percentage = round((psql_db_size / settings.PSQL_DB_QUOTA)*100, 2)
# Convert to str, so django localization doesn't change dot delimiter to comma in different languages.
context["psql_db_usage"] = str(usage_percentage)
if usage_percentage >= 80.0:
messages.error(request, _("Database storage space usage is high, be prepared increase database quota. "
"If the database gets over the quota maximum, data will be lost!"))
if usage_percentage >= 95.0:
mail_admins(subject="Database is reaching maximum quota!",
message="Dear admin, you are receiving this message because the database "
"is reaching its maximum quota. "
"Current size of the database is %s and the usage percentage is %s%%." %
(str(psql_db_size_pretty), str(usage_percentage)))
return render(request, "../templates/infopage.html",
{'context': context,
'datasets': datasets_context,
})
| [
"henri.nieminen@gmail.com"
] | henri.nieminen@gmail.com |
b2cad08801464da5483622face487b998edb56d8 | 023d436179478be876d33d906e4b44923ff30c78 | /psf/make_explist.py | 4e750402afb1e657cbeb4ebe64910d3580082b91 | [] | no_license | rmjarvis/DESWL | 424fb339ddc01c55c9a3d996a7ee73c5e1684e1e | 7c5058772f3296f346460b4d47195c2f4696b380 | refs/heads/master | 2022-12-24T01:33:23.704770 | 2022-12-15T23:01:09 | 2022-12-15T23:01:15 | 17,381,717 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | import os, glob
import fitsio
meds_dir = '/astro/u/mjarvis/DES/meds'
meds_tag = 'y3v02a'
dirs = [ os.path.join(meds_dir, meds_tag) ]
outfile = meds_tag
bands = ['g','r','i','z','Y']
#bands = ['r']
all_exps = set()
for b in bands:
out_b = outfile + '_' + b
print out_b
exps = set()
for dir in dirs:
pat = os.path.join(dir,'*','*_'+b+'_meds-*.fits*')
print pat
for srclist in sorted(glob.glob(pat)):
print srclist
with fitsio.FITS(srclist) as f:
files = f['image_info']['image_path'][:]
for file in files:
if 'coadd' in file: continue
# file name looks like /somedir/D$exp_$band_c$ccd_r$run_immasked.fits
file_name = file.split('/')[-1]
tokens = file_name.split('_')
exp = tokens[0][1:]
band = tokens[1]
ccd = tokens[2][1:]
run = tokens[3][1:]
print(exp,band,ccd,run)
exps.add(str(int(exp)))
all_exps.add(str(int(exp)))
with open(out_b,'w') as out:
for exp in sorted(list(exps)):
out.write(exp + '\n')
out_all = outfile + '_' + ''.join(bands)
with open(out_all,'w') as out:
for exp in sorted(list(all_exps)):
out.write(exp + '\n')
| [
"michael@jarvis.net"
] | michael@jarvis.net |
744bba4df8fcce97608429295791d820e9424467 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/020_state/_exercises/templates/state_001.py | 2ef77ec310d0aeddf3816f1a9838370e70966df9 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,945 | py | # #!/usr/bin/env python
# # -*- coding: utf-8 -*-
#
# """
# Implementation of the state pattern
#
# http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/
#
# *TL;DR80
# Implements state as a derived class of the state pattern interface.
# Implements state transitions by invoking methods from the pattern's superclass.
# """
#
# ____ -f ______ p....
#
#
# c_ State o..
#
# """Base state. This is to share functionality"""
#
# ___ scan
# """Scan the dial to the next station"""
# pos +_ 1
# __ ? __ le. s..
# ? _ 0
# print(u"Scanning... Station is @ @"
# (s.. |? n..
#
#
# class AmState S...
#
# ___ - radio
# ? ?
# stations _ ["1250", "1380", "1510"]
# pos _ 0
# name _ "AM"
#
# ___ toggle_amfm
# print(u"Switching to FM")
# r___.st.. _ r___.fm...
#
#
# c_ FmState S..
#
# ___ - radio
# ? ?
# stations _ ["81.3", "89.1", "103.9"]
# pos _ 0
# name _ "FM"
#
# ___ toggle_amfm
# print(u"Switching to AM")
# r___.st.. _ r___.am..
#
#
# c_ Radio o..
#
# """A radio. It has a scan button, and an AM/FM toggle switch."""
#
# ___ -
# """We have an AM state and an FM state"""
# amstate _ ? ?
# fmstate _ ? ?
# state _ a...
#
# ___ toggle_amfm
# s___.t_a..
#
# ___ scan
# s__.s..
#
#
# # Test our radio out
# __ ________ __ _______
# radio _ ?
# actions _ |?.s..| * 2 + |?.t_a.. + |?.s.. * 2
# ? *_ 2
#
# ___ action __ ?
# ?
#
# ### OUTPUT ###
# # Scanning... Station is 1380 AM
# # Scanning... Station is 1510 AM
# # Switching to FM
# # Scanning... Station is 89.1 FM
# # Scanning... Station is 103.9 FM
# # Scanning... Station is 81.3 FM
# # Scanning... Station is 89.1 FM
# # Switching to AM
# # Scanning... Station is 1250 AM
# # Scanning... Station is 1380 AM
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
c9833de285b7a6a60acf23bd82fa802ffbfefdb8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/1870.py | 09a82b95cc01c7f5d3208fd0e8a993c6282edf71 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | #!/usr/bin/python -tt
import sys
import math
#Parse datas
def parseInput(fileName):
f = open(fileName, 'rU')
contentFile = f.readlines()
f.close()
nbCase = int(contentFile.pop(0))
testCaseList = [nbCase]
for i in range(nbCase):
minMax = contentFile.pop(0).replace('\n', '').split(' ')
testCaseList.append(minMax)
return testCaseList
def isPalindrome(value):
strValue = str(value)
return strValue==strValue[::-1]
def isFairAndQSquare(value):
squareValue = value**2
return isPalindrome(value) and isPalindrome(squareValue)
def countFairAndSquare(strMin, strMax):
minRoot = int(math.ceil(math.sqrt(int(strMin))))
maxRoot = int(math.floor(math.sqrt(int(strMax))))
result = 0
for i in range(minRoot, maxRoot + 1, 1):
if isFairAndQSquare(i):
result+=1
return result
def testAllCases(testCaseList) :
results = []
for i in range(int(testCaseList.pop(0))):
results.append(countFairAndSquare(testCaseList[i][0],testCaseList[i][1]))
return results
# Generate output file
def generateOutputFile(fileName, results):
f = open(fileName, 'w')
i= 1
for result in results:
f.write('Case #' + str(i) + ': ' + str(result) + '\n')
i+=1
f.close()
# Define a main() function
def main() :
args = sys.argv[1:]
if (len(args) != 2) :
print 'usage: <sourceFile> <outputFile>'
sys.exit(1)
testCaseList = parseInput(args[0])
results = testAllCases(testCaseList)
generateOutputFile(args[1], results)
# Standard boilerplate calling main
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c2a74e0eb8cecf41cf7e626855428a1f68f28092 | 5f845e784106ad48b3b0dddb3016eeafa9f4810b | /test/travis_tests.py | fb47dab4028a5ba12fab40fb9e42f7a541a326f9 | [] | no_license | robertej19/utils | d787a3d93e8be59b84ffe5f01fb1ca5b5e028604 | a78d32f257a71db6c3ca0dcc8b5db46466c049b0 | refs/heads/master | 2023-06-03T05:01:47.277087 | 2021-06-24T14:47:26 | 2021-06-24T14:47:26 | 265,590,605 | 1 | 0 | null | 2020-05-20T14:26:02 | 2020-05-20T14:26:01 | null | UTF-8 | Python | false | false | 2,154 | py | import subprocess
import os
import readtests
def test_function(command):
process = subprocess.Popen(command.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if command.expect_out != '0':
if stdout == command.expect_out:
return(stdout,stderr)
else:
err_mess = str(stderr) + "unexpected sdtout of: " + str(stdout)
return(stdout, err_mess)
else:
return(stdout,stderr)
test_folder= os.path.dirname(os.path.abspath(__file__))+'/clas12-test'
if os.path.isdir(test_folder):
print('removing previous database file')
subprocess.call(['rm','-rf',test_folder])
if os.path.isdir(test_folder):
print('removing previous database file')
subprocess.call(['rm','-rf',test_folder])
else:
print(test_folder+" is not present, not deleteing")
subprocess.call(['mkdir','-p',test_folder])
print(test_folder+" is now present")
#abspath = os.path.abspath(__file__)
#dname = os.path.dirname(abspath)+'/clas12-test'
os.chdir(test_folder)
f = open('msqlrw.txt',"w")
f.write("root\n")
f.write(" ")
f.close()
folders = ['utils','server','client']
for folder in folders:
folder_name= os.path.dirname(os.path.abspath(__file__))+'/'+folder
if not os.path.isdir(folder_name):
print('{0} not found, cloning from github'.format(folder))
substring = 'https://github.com/robertej19/{0}.git'.format(folder)
subprocess.call(['git','clone',substring])
filename = os.path.dirname(os.path.abspath(__file__))+'/utils/CLAS12OCR.db'
if os.path.isfile(filename):
print('removing previous database file')
subprocess.call(['rm',filename])
command_sequence = readtests.create_commands()
def run_through_tests(command_sequence):
err_sum = 0
for command in command_sequence:
out, err = test_function(command)
print('Testing command: {0}'.format(command.name))
if not err:
print('... success')
#print(out)
else:
print(out)
print('... fail, error message:')
print(err)
err_sum += 1
return err_sum
status = run_through_tests(command_sequence)
if status > 0:
exit(1)
else:
exit(0)
"""
#which condor_submit if val = 0, do not submit, print not found message
"""
| [
"robertej@mit.edu"
] | robertej@mit.edu |
b55a268e68090b5bca90142d3bcd799a5f5f42d1 | e06909502244ce808673b5a6d951f9cebbe1b638 | /CW2/ex4.py | a0f4a40ce527e8ca7f910bbf53c4f1f2df06932c | [] | no_license | tasneemab/RailsProject | a7bfb00c6f50d3729ece9964e648198417da3236 | 24786ecf3f3b7e88bc1cb0b199c7da6f6d32ddd3 | refs/heads/master | 2020-07-30T06:59:57.322791 | 2019-09-25T13:29:36 | 2019-09-25T13:29:36 | 210,126,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from datetime import datetime
#not finished yet!!
def report(f1, f2):
data = f1.readlines()
days = dict
def main():
f1 = open('input.txt', 'r+')
f2 = open('output.txt', 'w+')
report(f1, f2)
if __name__ == '__main__':
main()
| [
"tasneim.ao@gmail.com"
] | tasneim.ao@gmail.com |
29a153005e191440645425666fbd7fd33511a99a | 2d5a972218be09f4f544cc6ef4ff21b94e2b4823 | /magenta/music/mfcc_mel_test.py | a3d83f1071a6a9a52c68a7babc18f07a1c63467f | [
"Apache-2.0"
] | permissive | uchiiii/magenta | e13ef3b63a36dbf08077d95c9eb9c0a3deece25f | 276f372f2c4d56ce3c59fed8ca40e59a2d33c19f | refs/heads/master | 2022-10-02T15:40:12.059034 | 2020-06-05T19:03:44 | 2020-06-05T19:04:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,970 | py | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mfcc_mel."""
from absl.testing import absltest
from magenta.music import mfcc_mel
import numpy as np
class MfccMelTest(absltest.TestCase):
def testMelSpectrumAgreesWithGoldenValues(self):
# Parallel dsp/mfcc:mel_spectrum_test.
sample_count = 513
input_ = np.sqrt(np.arange(1, sample_count + 1))[np.newaxis, :]
spec_to_mel_matrix = mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=sample_count,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0)
mel_spectrum = np.dot(input_, spec_to_mel_matrix)
expected = np.array(
[7.422619, 10.30330648, 13.72703292, 17.24158686, 21.35253118,
25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637,
60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368,
129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942])
np.testing.assert_array_almost_equal(expected, mel_spectrum[0, :])
def testSpectrogramToMelMatrixChecksFrequencyBounds(self):
# Lower edge must be >= 0, but 0 is OK.
mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=513,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=0.0,
upper_edge_hertz=4000.0)
with self.assertRaises(ValueError):
mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=513,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=-1.0,
upper_edge_hertz=4000.0)
# Upper edge must be <= Nyquist, but Nyquist is OK.
mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=513,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=20.0,
upper_edge_hertz=11025.0)
with self.assertRaises(ValueError):
mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=513,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=20.0,
upper_edge_hertz=16000.0)
# Must be a positive gap between edges.
with self.assertRaises(ValueError):
mfcc_mel.SpectrogramToMelMatrix(
num_spectrogram_bins=513,
audio_sample_rate=22050,
num_mel_bins=20,
lower_edge_hertz=20.0,
upper_edge_hertz=20.0)
if __name__ == "__main__":
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
76285abcdd962627566757918feb0b26222b3b5b | 29a4c1e436bc90deaaf7711e468154597fc379b7 | /modules/predicates/doc/is_even.py | 4c5640849c00aa82ee0b977898da031df8ea24f5 | [
"BSL-1.0"
] | permissive | brycelelbach/nt2 | 31bdde2338ebcaa24bb76f542bd0778a620f8e7c | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | refs/heads/master | 2021-01-17T12:41:35.021457 | 2011-04-03T17:37:15 | 2011-04-03T17:37:15 | 1,263,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,984 | py | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename nt2::meta::logical<T>::type',
},
'simd_types' : ['real_'],
'special' : ['predicate'],
'type_defs' : [],
'types' : ['real_', 'signed_int_', 'unsigned_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 21/02/2011',
'included' : ['#include <nt2/sdk/meta/logical.hpp>'],
'no_ulp' : 'True',
'notes' : [],
'stamp' : 'modified by jt the 21/02/2011',
},
'ranges' : {
'default' : [['T(-10000)', 'T(10000)']],
},
'specific_values' : {
'default' : {
'nt2::One<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Two<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
'nt2::Zero<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
},
'real_' : {
'-nt2::Zero<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
'nt2::Half<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Inf<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Minf<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Mone<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Nan<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::One<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Quarter<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Two<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
'nt2::Zero<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
},
'signed_int_' : {
'nt2::Mone<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::One<T>()' : {'result' : 'nt2::False<r_t>()','ulp_thresh' : '0.5',},
'nt2::Two<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
'nt2::Zero<T>()' : {'result' : 'nt2::True<r_t>()','ulp_thresh' : '0.5',},
},
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::is_even(a0)'],
},
'property_value' : {
'default' : ['nt2::is_even(a0)'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['0'],
},
},
},
},
] | [
"jtlapreste@gmail.com"
] | jtlapreste@gmail.com |
fdd255fd0401fa2efcab14ecd4062eb704b3fb6d | f3e51466d00510f1dae58f1cb87dd53244ce4e70 | /LeetCodes/facebook/Valid Number.py | 72d2663600ba19a8b6eb08fd10c011ab40ece041 | [] | no_license | chutianwen/LeetCodes | 40d18e7aa270f8235342f0485bfda2bd1ed960e1 | 11d6bf2ba7b50c07e048df37c4e05c8f46b92241 | refs/heads/master | 2022-08-27T10:28:16.594258 | 2022-07-24T21:23:56 | 2022-07-24T21:23:56 | 96,836,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | '''
We use three flags: met_dot, met_e, met_digit, mark if we have met ., e or any digit so far. First we strip the string, then go through each char and make sure:
If char == + or char == -, then prev char (if there is) must be e
. cannot appear twice or after e
e cannot appear twice, and there must be at least one digit before and after e
All other non-digit char is invalid
'''
class Solution(object):
def isNumber(self, s):
"""
We use three flags: met_dot, met_e, met_digit, mark if we have met ., e or any digit so far. First we strip the string, then go through each char and make sure:
If char == + or char == -, then prev char (if there is) must be e
. cannot appear twice or after e
e cannot appear twice, and there must be at least one digit before and after e
All other non-digit char is invalid
:type s: str
:rtype: bool
"""
s = s.strip()
met_dot = met_e = met_digit = False
for i, char in enumerate(s):
if char in ['+', '-']:
if i > 0 and s[i-1] != 'e':
return False
elif char == '.':
if met_dot or met_e: return False
met_dot = True
elif char == 'e':
if met_e or not met_digit:
return False
met_e, met_digit = True, False
elif char.isdigit():
met_digit = True
else:
return False
return met_digit | [
"tianwen.chu@fedcentric.com"
] | tianwen.chu@fedcentric.com |
647c779b66ed9d8442e9da4c93c82e6ba1de6079 | b500b05ae5cbd3be12a6308baccc61a3f5fdbc81 | /adafruit_ble_adafruit/adafruit_service.py | 274293a5cb9f7e8a384b2a6c22de2baf530590c5 | [
"MIT"
] | permissive | dglaude/Adafruit_CircuitPython_BLE_Adafruit | fb54a1898d4c3bffcc6fdc87fbc115d7db3bda36 | ecd36c4f747cbb729fda57e79b2d1489d903cd2d | refs/heads/master | 2022-12-11T22:11:33.174069 | 2020-08-17T19:13:36 | 2020-08-17T19:13:36 | 291,056,354 | 0 | 0 | null | 2020-08-28T13:40:19 | 2020-08-28T13:40:18 | null | UTF-8 | Python | false | false | 4,412 | py | # The MIT License (MIT)
#
# Copyright (c) 2020 Dan Halbert for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ble_adafruit.adafruit_service`
================================================================================
Access to sensors and hardware on or connected to BLE-capable boards.
* Author(s): Dan Halbert
Implementation Notes
--------------------
**Hardware:**
* `Adafruit CircuitPlayground Bluefruit <https://www.adafruit.com/product/4333>`_
* `Adafruit CLUE nRF52840 Express <https://www.adafruit.com/product/4500>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Adafruit.git"
import struct
from micropython import const
from adafruit_ble.advertising import Advertisement, LazyObjectField
from adafruit_ble.advertising.standard import ManufacturerData, ManufacturerDataField
from adafruit_ble.attributes import Attribute
from adafruit_ble.characteristics import Characteristic
from adafruit_ble.characteristics.int import Int32Characteristic, Uint32Characteristic
from adafruit_ble.uuid import VendorUUID
from adafruit_ble.services import Service
_MANUFACTURING_DATA_ADT = const(0xFF)
_ADAFRUIT_COMPANY_ID = const(0x0822)
_PID_DATA_ID = const(0x0001) # This is the same as the Radio data id, unfortunately.
class AdafruitServerAdvertisement(Advertisement):
"""Advertise the Adafruit company ID and the board USB PID.
"""
match_prefixes = (
struct.pack(
"<BHBH",
_MANUFACTURING_DATA_ADT,
_ADAFRUIT_COMPANY_ID,
struct.calcsize("<HH"),
_PID_DATA_ID,
),
)
manufacturer_data = LazyObjectField(
ManufacturerData,
"manufacturer_data",
advertising_data_type=_MANUFACTURING_DATA_ADT,
company_id=_ADAFRUIT_COMPANY_ID,
key_encoding="<H",
)
pid = ManufacturerDataField(_PID_DATA_ID, "<H")
"""The USB PID (product id) for this board."""
def __init__(self):
super().__init__()
self.connectable = True
self.flags.general_discovery = True
self.flags.le_only = True
class AdafruitService(Service):
"""Common superclass for all Adafruit board services."""
@staticmethod
def adafruit_service_uuid(n):
"""Generate a VendorUUID which fills in a 16-bit value in the standard
Adafruit Service UUID: ADAFnnnn-C332-42A8-93BD-25E905756CB8.
"""
return VendorUUID("ADAF{:04x}-C332-42A8-93BD-25E905756CB8".format(n))
@classmethod
def measurement_period_charac(cls, msecs=1000):
"""Create a measurement_period Characteristic for use by a subclass."""
return Int32Characteristic(
uuid=cls.adafruit_service_uuid(0x0001),
properties=(Characteristic.READ | Characteristic.WRITE),
initial_value=msecs,
)
@classmethod
def service_version_charac(cls, version=1):
"""Create a service_version Characteristic for use by a subclass."""
return Uint32Characteristic(
uuid=cls.adafruit_service_uuid(0x0002),
properties=Characteristic.READ,
write_perm=Attribute.NO_ACCESS,
initial_value=version,
)
| [
"halbert@halwitz.org"
] | halbert@halwitz.org |
fe75249c1452e36df7494cb510a154a856ca29ba | ecb50d1628ed70d3d5c041c091e3c41bbecdc758 | /node_scripts/edgetpu_human_pose_estimator.py | 1c5d07c1f16c1fe8a9cb868c0dc754d91f8be007 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | geometrikal/coral_usb_ros | 67f0be90fe0566996cede5543d7b9108c8d90347 | 46341a30ec91d887f631353f1e7b26680d75e8d1 | refs/heads/master | 2022-11-06T16:34:50.426617 | 2020-07-03T04:43:23 | 2020-07-03T04:43:23 | 275,991,331 | 0 | 0 | NOASSERTION | 2020-06-30T03:51:22 | 2020-06-30T03:51:22 | null | UTF-8 | Python | false | false | 5,004 | py | #!/usr/bin/env python
import matplotlib
matplotlib.use("Agg") # NOQA
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# OpenCV import for python3.5
sys.path.remove('/opt/ros/{}/lib/python2.7/dist-packages'.format(os.getenv('ROS_DISTRO'))) # NOQA
import cv2 # NOQA
sys.path.append('/opt/ros/{}/lib/python2.7/dist-packages'.format(os.getenv('ROS_DISTRO'))) # NOQA
from chainercv.visualizations import vis_point
from cv_bridge import CvBridge
import rospkg
import rospy
from dynamic_reconfigure.server import Server
from geometry_msgs.msg import Point
from geometry_msgs.msg import Pose
from jsk_recognition_msgs.msg import PeoplePose
from jsk_recognition_msgs.msg import PeoplePoseArray
from jsk_topic_tools import ConnectionBasedTransport
from sensor_msgs.msg import Image
from coral_usb.cfg import EdgeTPUHumanPoseEstimatorConfig
from coral_usb import PoseEngine
class EdgeTPUHumanPoseEstimator(ConnectionBasedTransport):
def __init__(self):
super(EdgeTPUHumanPoseEstimator, self).__init__()
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('coral_usb')
self.bridge = CvBridge()
self.classifier_name = rospy.get_param(
'~classifier_name', rospy.get_name())
model_file = os.path.join(
pkg_path,
'./python/coral_usb/posenet/models/'
'posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite')
model_file = rospy.get_param('~model_file', model_file)
self.engine = PoseEngine(model_file, mirror=False)
self.resized_H = self.engine.image_height
self.resized_W = self.engine.image_width
# dynamic reconfigure
self.srv = Server(
EdgeTPUHumanPoseEstimatorConfig, self.config_callback)
self.pub_pose = self.advertise(
'~output/poses', PeoplePoseArray, queue_size=1)
self.pub_image = self.advertise(
'~output/image', Image, queue_size=1)
def subscribe(self):
self.sub_image = rospy.Subscriber(
'~input', Image, self.image_cb, queue_size=1, buff_size=2**26)
def unsubscribe(self):
self.sub_image.unregister()
@property
def visualize(self):
return self.pub_image.get_num_connections() > 0
def config_callback(self, config, level):
self.score_thresh = config.score_thresh
self.joint_score_thresh = config.joint_score_thresh
return config
def image_cb(self, msg):
img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='rgb8')
resized_img = cv2.resize(img, (self.resized_W, self.resized_H))
H, W, _ = img.shape
y_scale = self.resized_H / H
x_scale = self.resized_W / W
poses, _ = self.engine.DetectPosesInImage(resized_img.astype(np.uint8))
poses_msg = PeoplePoseArray()
poses_msg.header = msg.header
points = []
visibles = []
for pose in poses:
if pose.score < self.score_thresh:
continue
pose_msg = PeoplePose()
point = []
visible = []
for lbl, keypoint in pose.keypoints.items():
resized_key_y, resized_key_x = keypoint.yx
key_y = resized_key_y / y_scale
key_x = resized_key_x / x_scale
point.append((key_y, key_x))
if keypoint.score < self.joint_score_thresh:
visible.append(False)
continue
pose_msg.limb_names.append(lbl)
pose_msg.scores.append(keypoint.score)
pose_msg.poses.append(
Pose(position=Point(x=key_x, y=key_y)))
visible.append(True)
poses_msg.poses.append(pose_msg)
points.append(point)
visibles.append(visible)
self.pub_pose.publish(poses_msg)
points = np.array(points, dtype=np.int32)
visibles = np.array(visibles, dtype=np.bool)
if self.visualize:
fig = plt.figure(
tight_layout={'pad': 0})
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
vis_point(img.transpose((2, 0, 1)), points, visibles, ax=ax)
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
vis_img = np.fromstring(
fig.canvas.tostring_rgb(), dtype=np.uint8)
vis_img.shape = (h, w, 3)
fig.clf()
plt.close()
vis_msg = self.bridge.cv2_to_imgmsg(vis_img, 'rgb8')
# BUG: https://answers.ros.org/question/316362/sensor_msgsimage-generates-float-instead-of-int-with-python3/ # NOQA
vis_msg.step = int(vis_msg.step)
vis_msg.header = msg.header
self.pub_image.publish(vis_msg)
if __name__ == '__main__':
rospy.init_node('edgetpu_human_pose_estimator')
detector = EdgeTPUHumanPoseEstimator()
rospy.spin()
| [
"shingogo@hotmail.co.jp"
] | shingogo@hotmail.co.jp |
5e836e61951a7039eb57b3750d56a0af699f18e0 | 3222a9f99dc04eae93442f27a49259f2c74320a9 | /uninas/optimization/hpo/pymoo/result.py | 47419a410e3913b9d745d1848e1f86a11b62b3fa | [
"MIT"
] | permissive | Light-Reflection/uninas | 55a754c997b6d7a48aa6ee93fc4f1211c9a35740 | b5727308b48b2fe399cc9b5d5732f3f5fd913b35 | refs/heads/main | 2023-02-20T11:48:06.815664 | 2021-01-25T09:42:43 | 2021-01-25T09:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,557 | py | import numpy as np
import matplotlib.pyplot as plt
from pymoo.performance_indicator.hv import Hypervolume
from pymoo.model.result import Result
from pymoo.optimize import minimize
from uninas.utils.loggers.python import Logger, log_in_columns
class SingleResult:
def __init__(self, x: np.array, f: np.array, g: np.array, cv: np.array):
self.x = x
self.f = f
self.g = g
self.cv = cv
class PymooResultWrapper:
"""
Convenience wrapper for the pymoo result
"""
def __init__(self, result: Result):
self.result = result
@classmethod
def minimize(cls, *args, **kwargs):
result = minimize(*args, **kwargs)
return cls(result)
def sorted_best(self, reverse=False) -> [SingleResult]:
best = [SingleResult(x, f, g, cv)
for x, f, g, cv in zip(self.result.X, self.result.F, self.result.G, self.result.CV)]
return sorted(best, reverse=reverse, key=lambda sr: sr.f[0])
def n_eval_by_iteration(self) -> list:
return [a.evaluator.n_eval for a in self.result.history]
def population_by_iteration(self) -> list:
return [a.pop for a in self.result.history]
def feasible_values_by_iteration(self) -> list:
pops = self.population_by_iteration()
return [p[p.get("feasible")[:, 0]].get("F") for p in pops]
def feasible_population_by_iteration(self) -> list:
pops = self.population_by_iteration()
return [p[p.get("feasible")[:, 0]] for p in pops]
def log_best(self, logger: Logger):
signs = self.result.problem.objective_signs(only_minimize=True)
rows = [['', 'gene'] + self.result.problem.objective_labels(only_minimize=False)]
logger.info("best candidates:")
for i, sr in enumerate(self.sorted_best()):
rows.append([i, sr.x] + [v for v in sr.f*signs])
log_in_columns(logger, rows, add_bullets=True)
def plot_all_f(self, checkpoint_dir: str, name='fa'):
""" plot all populations over time """
plt.clf()
ax = plt.gca()
cmap = plt.get_cmap('plasma')
population_by_iteration = self.feasible_population_by_iteration()
labels = self.result.problem.objective_labels(only_minimize=False)
signs = self.result.problem.objective_signs(only_minimize=True)
for i, population in [e for e in enumerate(population_by_iteration)]:
x, y = [], []
for ind in population:
x.append(ind.F[0] * signs[0])
y.append(ind.F[1] * signs[1])
ax.scatter(x, y, label='__no_legend__', s=16,
c=[i]*len(x), cmap=cmap, vmin=0, vmax=len(population_by_iteration)-1)
plt.plot(*self.result.problem.plottable_pareto_front(), color="black", alpha=0.7)
plt.xlabel(labels[0])
plt.ylabel(labels[1])
try:
col_bar = plt.colorbar(ax.get_children()[2], ax=ax)
col_bar.set_label('iterations')
except:
pass
plt.savefig('%s/%s.pdf' % (checkpoint_dir, name))
def plot_hv(self, checkpoint_dir: str, name='hv'):
""" plot the hyper-volume over time """
metric = Hypervolume(ref_point=self.result.problem.get_ref_point())
x = self.n_eval_by_iteration()
hv = [metric.calc(f) for f in self.feasible_values_by_iteration()]
plt.clf()
plt.plot(x, hv, '-o')
plt.xlabel("Function Evaluations")
plt.ylabel("Hyper-Volume")
plt.savefig('%s/%s.pdf' % (checkpoint_dir, name))
| [
"kevin.laube@uni-tuebingen.de"
] | kevin.laube@uni-tuebingen.de |
15c2595fa02234f622223f3712ecc3c110661343 | 294a88c940d95b8810f5387f720d9ce006bb68c8 | /获取请求参数.py | 35d1c87dc5350516321f619b40be957242828231 | [] | no_license | KingsleyDeng/PythonFlask | 5bbd484cfbbe25854e112fd5e6ba89b02d472ee2 | e5d35173636334500820d100e11c7e73a167dab4 | refs/heads/master | 2021-03-15T02:30:28.077495 | 2020-04-16T09:19:30 | 2020-04-16T09:19:30 | 246,817,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # coding utf-8
# __author__:KingsleyDeng
from flask import Flask, request
app = Flask(__name__)
@app.route("/indexs", methods=["GET", "POST"])
def index():
# request 中包含了前端发送过来的所有请求数据
# form和data是用来提取请求体参数
# request.form 可以直接提取请求体中的表单格式的数据
# 通过get方式只能拿到多个同名参数的第一个
name = request.form.get("name")
age = request.form.get("age")
print("request.data: %s" %request.data)
# args是用来提取url中的参数(查询字符串)
city = request.args.get("city")
return "hello name=%s, age=%s, city=%s" % (name, age, city)
if __name__ == '__main__':
app.run(debug=True)
| [
"460556933@qq.com"
] | 460556933@qq.com |
80485ce718500ca358be8d48737f12d0e00ff2f8 | a4830a0189c325c35c9021479a5958ec870a2e8b | /routing/api/serializers.py | fa0faff2e2395300d719b22e0799136c9f3972b2 | [] | no_license | solutionprovider9174/steward | 044c7d299a625108824c854839ac41f51d2ca3fd | fd681593a9d2d339aab0f6f3688412d71cd2ae32 | refs/heads/master | 2022-12-11T06:45:04.544838 | 2020-08-21T02:56:55 | 2020-08-21T02:56:55 | 289,162,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,379 | py | # third party
from rest_framework import serializers
from rest_framework.reverse import reverse, reverse_lazy
from rest_framework.serializers import raise_errors_on_nested_writes
from rest_framework.utils import model_meta
# local
from routing import models
class RecordSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:routing-record-detail')
class Meta:
model = models.Record
fields = ('url', 'id', 'route', 'order', 'preference', 'flags', 'service', 'regex', 'replacement')
class RouteSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:routing-route-detail')
records = RecordSerializer(many=True, read_only=True)
class Meta:
model = models.Route
fields = ('url', 'id', 'name', 'trunkgroup', 'records', 'type')
class NumberSerializer(serializers.ModelSerializer):
class Meta:
model = models.Number
fields = ('cc', 'number', 'route', 'modified')
def create(self, validated_data):
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
cc = validated_data.pop('cc')
number = validated_data.pop('number')
validated_data['active'] = True
instance,created = ModelClass.objects.update_or_create(cc=cc, number=number, defaults=validated_data)
except TypeError as exc:
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception text was: %s.' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
exc
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
setattr(instance, field_name, value)
return instance
class FraudBypassSerializer(serializers.ModelSerializer):
class Meta:
model = models.FraudBypass
fields = ('cc', 'number', 'modified')
def create(self, validated_data):
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
cc = validated_data.pop('cc')
number = validated_data.pop('number')
instance,created = ModelClass.objects.update_or_create(cc=cc, number=number)
except TypeError as exc:
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception text was: %s.' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
exc
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
setattr(instance, field_name, value)
return instance
class OutboundRouteSerializer(serializers.ModelSerializer):
class Meta:
model = models.OutboundRoute
fields = ('number', 'end_office_route', 'long_distance_route', 'comment', 'modified')
def create(self, validated_data):
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
number = validated_data.pop('number')
instance,created = ModelClass.objects.update_or_create(number=number, defaults=validated_data)
except TypeError as exc:
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception text was: %s.' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
exc
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
setattr(instance, field_name, value)
return instance
class RemoteCallForwardSerializer(serializers.ModelSerializer):
class Meta:
model = models.RemoteCallForward
fields = ('called_number', 'forward_number', 'modified')
| [
"guangchengwang9174@yandex.com"
] | guangchengwang9174@yandex.com |
452d304cd0cb7e229c461780f1c01c85f3b2c965 | 349f39b27a7c3157a1f3db65f35b96bcdb2f5919 | /03/xx/07-sklearn/03-recommender/b-recommendation/pearson_score.py | 02025c50814123b47e96f25d161de7074e4efb5a | [] | no_license | microgenios/cod | 5f870c9cefbb80d18690909baa4c9d8b9be463c2 | 0805609cc780244c640963dc4c70052e3df57b4e | refs/heads/master | 2022-12-08T20:10:11.742940 | 2020-02-29T10:37:10 | 2020-02-29T10:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #!/usr/bin/python
import json
import numpy as np
def pearson_score(dataset, user1, user2): # Returns the Pearson correlation score between user1 and user2
if user1 not in dataset:
raise TypeError("User " + user1 + " not present in the dataset")
if user2 not in dataset:
raise TypeError("User " + user2 + " not present in the dataset")
rated_by_both = {} # Movies rated by both user1 and user2
for item in dataset[user1]:
if item in dataset[user2]:
rated_by_both[item] = 1
num_ratings = len(rated_by_both)
if num_ratings == 0: # If there are no common movies, the score is 0
return 0
user1_sum = np.sum([dataset[user1][item] for item in rated_by_both]) # Compute the sum of ratings of all the common preferences
user2_sum = np.sum([dataset[user2][item] for item in rated_by_both])
user1_squared_sum = np.sum([np.square(dataset[user1][item]) for item in rated_by_both]) # Compute the sum of squared ratings of all the common preferences
user2_squared_sum = np.sum([np.square(dataset[user2][item]) for item in rated_by_both])
product_sum = np.sum([dataset[user1][item] * dataset[user2][item] for item in rated_by_both]) # Compute the sum of products of the common ratings
Sxy = product_sum - (user1_sum * user2_sum / num_ratings) # Compute the Pearson correlation
Sxx = user1_squared_sum - np.square(user1_sum) / num_ratings
Syy = user2_squared_sum - np.square(user2_sum) / num_ratings
if Sxx * Syy == 0:
return 0
return Sxy / np.sqrt(Sxx * Syy)
if __name__ == "__main__":
data_file = "movie_ratings.json"
with open(data_file, "r") as f:
data = json.loads(f.read())
user1 = "John Carson"
user2 = "Michelle Peterson"
print "\nPearson score:"
print pearson_score(data, user1, user2)
| [
"githubfortyuds@gmail.com"
] | githubfortyuds@gmail.com |
702d1ef1af4bc8885ca91c57de10eca08084906c | cccc9fa74b16cc4a2ae37dfb2449d6dc1ce215cd | /ontological_comparison/cluster/ranges-vs-binary/5.classification_framework.py | ab11325643f0b3c16a0b503df5f6907008eac16f | [] | no_license | nagyistge/brainmeta | 611daf90d77432fa72a79b30fa4b895a60647536 | 105cffebcc0bf1c246ed11b67f3da2fff4a05f99 | refs/heads/master | 2021-05-30T06:44:40.095517 | 2016-01-11T23:21:50 | 2016-01-11T23:21:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | #!/usr/bin/python
from glob import glob
import numpy
import pickle
import pandas
import os
base = "/share/PI/russpold/work/IMAGE_COMPARISON/ONTOLOGICAL_COMPARISON"
data = "%s/data" %base # mostly images
scores_folder = "%s/individual_scores" %(data) # output folder for individual scores
likelihood_pickles = glob("%s/likelihood/*.pkl" %(data))
scores = glob("%s/*.pkl" %scores_folder)
# Read in results files
# Save tables with result to file
ri_ranges = pandas.read_csv("%s/reverse_inference_scores_ranges.tsv" %data,sep="\t")
ri_binary = pandas.read_csv("%s/reverse_inference_scores_binary.tsv" %data,sep="\t")
ri_priors_in = pandas.read_csv("%s/reverse_inference_priors_in.tsv" %data,sep="\t")
ri_priors_out = pandas.read_csv("%s/reverse_inference_priors_out.tsv" %data,sep="\t")
bayes_in_ranges = pandas.read_csv("%s/reverse_inference_bayes_in_ranges" %data,sep="\t")
bayes_out_ranges = pandas.read_csv("%s/reverse_inference_bayes_out_ranges.tsv" %data,sep="\t")
bayes_in_bin = pandas.read_csv("%s/reverse_inference_bayes_in_binary.tsv" %data,sep="\t")
bayes_out_bin = pandas.read_csv("%s/reverse_inference_bayes_out_binary.tsv" %data,sep="\t")
count_bin = pandas.DataFrame(0,columns=["for","against"],index=bayes_in_bin.columns)
count_range = pandas.DataFrame(0,columns=["for","against"],index=bayes_in_bin.columns)
# For each likelihood pickle, read in the "in" and "out" groups, and take a count for when evidence for > evidence against
for i in range(0,len(likelihood_pickles)):
print "Parsing %s of %s" %(i,len(likelihood_pickles))
node = likelihood_pickles[i]
group = pickle.load(open(node,"rb"))
nid = group["nid"]
# Look at bayes for range and bin given "in" group
for image in group["in"]:
image_id = os.path.split(image)[1].replace(".nii.gz","")
# If we have data yet!
if not numpy.isnan(ri_ranges.loc[image_id,nid]):
if bayes_in_ranges.loc[image_id,nid] > bayes_out_ranges.loc[image_id,nid]:
count_range.loc[nid,"for"] = count_range.loc[nid,"for"] + 1
else:
count_range.loc[nid,"against"] = count_range.loc[nid,"against"] + 1
if bayes_in_bin.loc[image_id,nid] > bayes_out_bin.loc[image_id,nid]:
count_bin.loc[nid,"for"] = count_bin.loc[nid,"for"] + 1
else:
count_bin.loc[nid,"against"] = count_bin.loc[nid,"against"] + 1
# Look at bayes for range and bin given "out" group
for image in group["out"]:
image_id = os.path.split(image)[1].replace(".nii.gz","")
# If we have data yet!
if not numpy.isnan(ri_ranges.loc[image_id,nid]):
if bayes_in_ranges.loc[image_id,nid] < bayes_out_ranges.loc[image_id,nid]:
count_range.loc[nid,"for"] = count_range.loc[nid,"for"] + 1
else:
count_range.loc[nid,"against"] = count_range.loc[nid,"against"] + 1
if bayes_in_bin.loc[image_id,nid] < bayes_out_bin.loc[image_id,nid]:
count_bin.loc[nid,"for"] = count_bin.loc[nid,"for"] + 1
else:
count_bin.loc[nid,"against"] = count_bin.loc[nid,"against"] + 1
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
2f0773b8c5d46f6bd230c06c7196562bbf16b9e1 | 9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15 | /165 Compare Version Numbers.py | 1f61ba3654b784b4d7c93411d075ff1403b616d0 | [
"MIT"
] | permissive | Aminaba123/LeetCode | 178ed1be0733cc7390f30e676eb47cc7f900c5b2 | cbbd4a67ab342ada2421e13f82d660b1d47d4d20 | refs/heads/master | 2020-04-20T10:40:00.424279 | 2019-01-31T08:13:58 | 2019-01-31T08:13:58 | 168,795,374 | 1 | 0 | MIT | 2019-02-02T04:50:31 | 2019-02-02T04:50:30 | null | UTF-8 | Python | false | false | 1,337 | py | """
Compare two version numbers version1 and version2.
If version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" or "half way to version three", it is the fifth second-level revision of the
second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
__author__ = 'Daniel'
class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype int
"""
version1 = map(int, version1.split("."))
version2 = map(int, version2.split("."))
n1 = len(version1)
n2 = len(version2)
for i in xrange(min(n1, n2)):
if version1[i] == version2[i]:
pass
else:
return -1 if version1[i] < version2[i] else 1
# 1.0.0 and 1
if n1 == n2 or n1 > n2 and all(map(lambda x: x == 0, version1[n2:])) or \
n1 < n2 and all(map(lambda x: x == 0, version2[n1:])):
return 0
return -1 if n1 < n2 else 1
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
fffc07050ea3893fe210204e4620ee236ef47575 | bc14b7d58665f813ec4ff160f63d972989bd9d33 | /Hackerrank/Find_Digits.py | b5cac2f65b6d958dd26a5cc6312f6844fb8edd9f | [] | no_license | Dhrumil-Zion/Competitive-Programming-Basics | 4f2a7dba7740c2b11c0739fa541d3f2fc651c01e | 31d52f35ade0a99e32f933b37712654e025cb584 | refs/heads/main | 2023-05-31T23:19:58.159508 | 2021-07-05T17:45:32 | 2021-07-05T17:45:32 | 343,542,404 | 2 | 0 | null | 2021-05-26T14:13:50 | 2021-03-01T20:06:52 | Python | UTF-8 | Python | false | false | 184 | py | def findDigits(n):
t=n
c=0
while n!=0:
temp = n%10
n=int(n/10)
if temp ==0:
continue
if t%temp==0:
c+=1
return c | [
"dhrumilg699@gmail.com"
] | dhrumilg699@gmail.com |
e0347c43c947ac2e36a49b166a3a183a273c1f17 | c29de7ce2d91f572aeb4da56801de7a1dc034054 | /lds/lds/mnist/cnn_model_004.py | 2f75bceb8d0da18df607934f36ec89e04a36dc75 | [] | no_license | kzky/works | 18b8d754bfc2b1da22022926d882dfe92ea785e6 | b8708c305e52f924ea5a7071e0dfe5f2feb7a0a3 | refs/heads/master | 2021-01-10T08:04:44.831232 | 2018-03-01T15:09:47 | 2018-03-01T15:09:47 | 54,316,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,149 | py | """Models
Resnet Encoder and Decoder. The residual block consists of three convolutions.
"""
import numpy as np
import chainer
import chainer.variable as variable
from chainer.functions.activation import lstm
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from collections import OrderedDict
import logging
import time
from lds.utils import to_device
from lds.chainer_fix import BatchNormalization
class ResEnc(Chain):
def __init__(self, inmap=32, outmap=64, act=F.relu, dn=False):
if dn:
super(ResEnc, self).__init__(
conv0=L.Convolution2D(inmap, outmap, 4, stride=2, pad=1),
conv1=L.Convolution2D(outmap, outmap, 3, stride=1, pad=1),
conv2=L.Convolution2D(outmap, outmap, 3, stride=1, pad=1),
conv3=L.Convolution2D(inmap, outmap, 4, stride=2, pad=1),
bn0=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn1=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn2=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn3=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
)
else:
super(ResEnc, self).__init__(
conv0=L.Convolution2D(inmap, outmap, 1, stride=1, pad=0),
conv1=L.Convolution2D(inmap, outmap, 3, stride=1, pad=1),
conv2=L.Convolution2D(inmap, outmap, 1, stride=1, pad=0),
bn0=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
bn1=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
bn2=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
)
self.act = act
self.dn = dn
def __call__(self, x, test=False):
h = self.conv0(x)
h = self.bn0(h, test)
h = self.act(h)
h = self.conv1(h)
h = self.bn1(h, test)
h = self.act(h)
h = self.conv2(h)
h = self.bn2(h, test)
h = self.act(h)
h_s = x
if self.dn:
h_s = self.conv3(x)
h_s = self.bn3(h_s, test)
h_s = self.act(h_s)
return h + h_s
class ResDec(Chain):
def __init__(self, inmap=32, outmap=64, act=F.relu, up=False):
if up:
super(ResDec, self).__init__(
decovn0=L.Deconvolution2D(inmap, outmap, 4, stride=2, pad=1),
decovn1=L.Deconvolution2D(outmap, outmap, 3, stride=1, pad=1),
decovn2=L.Deconvolution2D(outmap, outmap, 3, stride=1, pad=1),
decovn3=L.Deconvolution2D(inmap, outmap, 4, stride=2, pad=1),
bn0=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn1=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn2=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
bn3=L.BatchNormalization(outmap, decay=0.9, use_cudnn=False),
)
else:
super(ResDec, self).__init__(
decovn0=L.Deconvolution2D(inmap, outmap, 1, stride=1, pad=0),
decovn1=L.Deconvolution2D(inmap, outmap, 3, stride=1, pad=1),
decovn2=L.Deconvolution2D(inmap, outmap, 1, stride=1, pad=0),
bn0=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
bn1=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
bn2=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False),
bn3=L.BatchNormalization(inmap, decay=0.9, use_cudnn=False)
)
self.act = act
self.up = up
self.outmap = outmap
def __call__(self, x, test=False):
h = self.decovn0(x)
h = self.bn0(h, test)
h = self.act(h)
h = self.decovn1(h)
h = self.bn1(h, test)
h = self.act(h)
h = self.decovn2(h)
h = self.bn2(h, test)
if self.outmap != 1:
h = self.act(h)
h_s = x
if self.up:
h_s = self.decovn3(x)
h_s = self.bn3(h_s, test)
if self.outmap != 1:
h_s = self.act(h_s)
return h + h_s
class Encoder(Chain):
def __init__(self, act=F.relu):
super(Encoder, self).__init__(
# Encoder
resenc0=ResEnc(1, 32, act, dn=True),
resenc1=ResEnc(32, 32, act),
resenc2=ResEnc(32, 64, act, dn=True),
resenc3=ResEnc(64, 64, act),
linear0=L.Linear(64 * 7 * 7, 32),
linear1=L.Linear(32, 10),
bn0=L.BatchNormalization(32, decay=0.9, use_cudnn=False),
# BranchNet
linear0_bn=L.Linear(32*14*14, 10),
linear1_bn=L.Linear(32*14*14, 10),
linear2_bn=L.Linear(64*7*7, 10),
linear3_bn=L.Linear(64*7*7, 10),
linear4_bn=L.Linear(32, 10),
)
self.act = act
self.hiddens = []
self.classifiers = []
def __call__(self, x, test=False):
self.hiddens = []
self.classifiers = []
h = self.resenc0(x) # 14x14
self.hiddens.append(h)
y = self.linear0_bn(h)
self.classifiers.append(y)
h = self.resenc1(h) # 14x14
self.hiddens.append(h)
y = self.linear1_bn(h)
self.classifiers.append(y)
h = self.resenc2(h) # 7x7
self.hiddens.append(h)
y = self.linear2_bn(h)
self.classifiers.append(y)
h = self.resenc3(h) # 7x7
self.hiddens.append(h)
y = self.linear3_bn(h)
self.classifiers.append(y)
h = self.linear0(h)
h = self.bn0(h, test)
h = self.act(h)
self.hiddens.append(h)
y = self.linear4_bn(h)
self.classifiers.append(y)
h = self.linear1(h)
return h
class Decoder(Chain):
def __init__(self, act=F.relu):
super(Decoder, self).__init__(
# Decoer
linear0=L.Linear(10, 32),
linear1=L.Linear(32, 64 * 7 * 7),
bn0=L.BatchNormalization(32, decay=0.9, use_cudnn=False),
bn1=L.BatchNormalization(64 * 7 * 7, decay=0.9, use_cudnn=False),
resdec0=ResDec(64, 64, act),
resdec1=ResDec(64, 32, act, up=True),
resdec2=ResDec(32, 32, act),
resdec3=ResDec(32, 1, act, up=True),
# BranchNet
linear0_bn=L.Linear(32, 10),
linear1_bn=L.Linear(64*7*7, 10),
linear2_bn=L.Linear(64*7*7, 10),
linear3_bn=L.Linear(32*14*14, 10),
linear4_bn=L.Linear(32*14*14, 10),
)
self.act = act
self.hiddens = []
self.classifiers = []
def __call__(self, y, test=False):
self.hiddens = []
self.classifiers = []
bs = y.shape[0]
h = self.linear0(y)
h = self.bn0(h, test)
h = self.act(h)
self.hiddens.append(h)
y = self.linear0_bn(h)
self.classifiers.append(y)
h = self.linear1(h)
h = self.bn1(h, test)
h = self.act(h)
h = F.reshape(h, (bs, 64, 7, 7))
self.hiddens.append(h)
y = self.linear1_bn(h)
self.classifiers.append(y)
h = self.resdec0(h) #7x7
self.hiddens.append(h)
y = self.linear2_bn(h)
self.classifiers.append(y)
h = self.resdec1(h) #14x14
self.hiddens.append(h)
y = self.linear3_bn(h)
self.classifiers.append(y)
h = self.resdec2(h) #14x14
self.hiddens.append(h)
y = self.linear4_bn(h)
self.classifiers.append(y)
h = self.resdec3(h) #28x28
return h
class AutoEncoder(Chain):
def __init__(self, act=F.relu):
super(AutoEncoder, self).__init__(
encoder=Encoder(act=act),
decoder=Decoder(act=act)
)
| [
"rkzfilter@gmail.com"
] | rkzfilter@gmail.com |
e0740789c350a5f1de0471f47487fe3b782556a5 | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/ibvpy/core/rtrace_eval.py | 5e23d0423eb8c743c760e9158c686a89c459eee9 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 1,796 | py |
from enthought.traits.api import Array, Bool, Callable, Enum, Float, HasTraits, \
Instance, Int, Trait, Str, Enum, Callable, List, TraitDict, Any, \
Dict, Property, cached_property, WeakRef, Delegate
from enthought.traits.ui.api import Item, View, HGroup, ListEditor, VGroup, VSplit, Group, HSplit
from enthought.traits.ui.menu import NoButtons, OKButton, CancelButton, Action, CloseAction, Menu, \
MenuBar, Separator
import wx
#from enthought.pyface.tvtk.actor_editor import ActorEditor
from i_tstepper_eval import ITStepperEval
class RTraceEval( HasTraits ):
name = Str( 'unnamed' )
ts = WeakRef( ITStepperEval )
u_mapping = Callable
eval = Callable
def __call__( self, sctx, u, *args, **kw ):
# When crossing the levels - start a mapping
# This method might have side effects for the context
# - mapping of global to local values
#
args_mapped = []
kw_mapped = {}
if self.u_mapping:
u = self.u_mapping( sctx, u )
# map everything that has been sent together with u
# this might be the time derivatives of u or its
# spatial integrals.
#
args_mapped = [ self.u_mapping( sctx, u_value )
for u_value in args ]
kw_mapped = {}
for u_name, u_value in kw.items():
kw_mapped[ u_name ] = self.u_mapping( sctx, u_value )
# Invoke the tracer evaluation.
#
try: val = self.eval( sctx, u, *args_mapped, **kw_mapped )
except TypeError, e:
raise TypeError, 'tracer name %s: %s %s' % ( self.name, e, self.eval )
return val
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
0130f25ca5518d95374b2a3401a3593824f8ed97 | a2c90d183ac66f39401cd8ece5207c492c811158 | /Solving_Problem/과제 스크립/동철이의 일 분배.py | 8bf7ad523e7629e6c0c26ac7ac9ecaabc0ec2576 | [] | no_license | kwoneyng/TIL | 0498cfc4dbebbb1f2c193cb7c9459aab7ebad02a | c6fbaa609b2e805f298b17b1f9504fd12cb63e8a | refs/heads/master | 2020-06-17T11:53:38.685202 | 2020-03-18T01:29:36 | 2020-03-18T01:29:36 | 195,916,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | def find(flr=0, rs=1):
global mx, n
if flr == n :
if rs > mx:
mx = rs
return 0
if rs <= mx:
return 0
for i in range(n):
if not vis[i]:
vis[i] = 1
find(flr+1, rs*bd[flr][i])
vis[i] = 0
for T in range(int(input())):
n = int(input())
bd = [list(map(int, input().split())) for i in range(n)]
for x in range(n):
for y in range(n):
bd[x][y] /= 100
mx = 0
vis = [0]*(n+1)
find()
print('#{}'.format(T+1),'%0.6f' % (mx*100)) | [
"nan308@naver.com"
] | nan308@naver.com |
187eca160dde7235ce4bbe8976e8f0277e668721 | 03615d7cde58bbddfc730995adbe0c77227dcef2 | /ch9-blog-viewsets-routers/django_project/urls.py | fdad2dba7a7570deac95e7a910abbba45515bec6 | [
"MIT"
] | permissive | wsvincent/restapiswithdjango | 90afd007ad8878ebdaeebbfd159ce2b815f6d3a9 | 4a12e5289beeb00308f2d8ceefa343666fa11c05 | refs/heads/master | 2023-05-24T19:34:37.406604 | 2022-03-22T17:52:41 | 2022-03-22T17:52:41 | 129,749,663 | 416 | 153 | MIT | 2023-08-31T21:50:04 | 2018-04-16T13:41:43 | JavaScript | UTF-8 | Python | false | false | 411 | py | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include("posts.urls")),
path("api-auth/", include("rest_framework.urls")),
path("api/v1/dj-rest-auth/", include("dj_rest_auth.urls")),
path(
"api/v1/dj-rest-auth/registration/", # new
include("dj_rest_auth.registration.urls"),
),
]
| [
"will@wsvincent.com"
] | will@wsvincent.com |
1baf81ac8088ea1dced86c42e60fa16eb6218d0b | 6b9865a42a86c538cff987b3079da5b644137b37 | /0x0D-python-almost_a_circle/models/square.py | ae2143a7039e2fc75c4b9f7cf23bdbcee70e5b7b | [] | no_license | zahraaassaad/holbertonschool-python | 099072b0350f594adf30b47eb18fcdce0375546d | 83d7d185f14f44ea4481538ab1e04463a9b62739 | refs/heads/master | 2023-01-09T03:34:48.082352 | 2020-11-20T17:54:05 | 2020-11-20T17:54:05 | 291,666,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | #!/usr/bin/python3
"""
This is a module for Square class.
"""
from models.rectangle import Rectangle
class Square(Rectangle):
"""A square class."""
def __init__(self, size, x=0, y=0, id=None):
"""Initialize square class."""
super().__init__(size, size, x, y, id)
def __str__(self):
"""Return string representation of class."""
return "[Square] ({}) {}/{} - {}".format(
self.id, self.x, self.y, self.width)
@property
def size(self):
"""Getter for size."""
return self.width
@size.setter
def size(self, size):
"""Setter for size."""
self.width = size
self.height = size
def update(self, *args, **kwargs):
"""Assigns an argument to each attribute."""
if args:
attrs = ["id", "size", "x", "y"]
for i, e in enumerate(args):
setattr(self, attrs[i], e)
return
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
def to_dictionary(self):
"""Return the dictionary representation of Square."""
d = {}
for k, v in vars(self).items():
if k.startswith("_"):
if not k.endswith("width") and not k.endswith("height"):
idx = k.index("__")
d[k[idx + 2:]] = v
else:
d["size"] = v
else:
d[k] = v
return d
| [
"zahraa.asaad8@gmail.com"
] | zahraa.asaad8@gmail.com |
50dc2692084c7d3f075076afbc6f0c949da83d9b | 32ccef68538ddb8e7aeb4c8958b5810ed9230fd7 | /SQL.py | 40c35a0112cadf2dc5fa4c01727a41c05500992a | [] | no_license | Vutsuak16/cricket_crawler | 977cd778806ca8f2937da3aaecb072223e20c569 | 025f852768c1a1f720f0fcfdc259f54b3169b162 | refs/heads/master | 2021-01-10T15:22:58.454174 | 2015-12-30T14:15:52 | 2015-12-30T14:15:52 | 46,099,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,993 | py | __author__ = 'kasutuv'
import MySQLdb
def insert_db(data):
try:
conn = db = MySQLdb.connect("localhost","root","","cricket_crawler" )
cur = conn.cursor()
data1,s,country,data2=modify(data)
print data
print s
cur.execute(
"CREATE TABLE IF NOT EXISTS cric_players(pname varchar(20) DEFAULT 'N.A.',pfullname varchar(40) NOT NULL,pcountry VARCHAR(20)DEFAULT 'N.A.',pborn varchar(60) DEFAULT 'N.A.',pcage varchar(20) DEFAULT 'N.A.',pmteam varchar(150) DEFAULT 'N.A.',pnick varchar(15) DEFAULT 'N.A.',prole varchar(25) DEFAULT 'N.A.',pbatt varchar(25) DEFAULT 'N.A.',pbowl varchar(25) DEFAULT 'N.A.' ,pfield varchar(25) DEFAULT 'N.A.',pprofile VARCHAR(1000) DEFAULT 'N.A.',PRIMARY KEY(pfullname));")
if len(data1) is 10:
cur.execute(
"INSERT INTO cric_players(pname, pfullname , pborn, pcage, pmteam ,pnick ,prole ,pbatt ,pbowl, pfield) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", data1)
elif len(data1) is 9:
cur.execute(
"INSERT INTO cric_players(pname, pfullname, pborn, pcage, pmteam, prole, pbatt, pbowl, pfield) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)", data1)
elif len(data1) is 8:
cur.execute(
"INSERT INTO cric_players(pname, pfullname, pborn, pcage, pmteam ,prole ,pbatt ,pbowl) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)", data1)
elif len(data1) is 7:
cur.execute(
"INSERT INTO cric_players(pname, pfullname, pborn, pcage, pmteam ,pbatt ,pbowl) VALUES(%s,%s,%s,%s,%s,%s,%s)", data1)
cur.execute(
"UPDATE cric_players SET pmteam =%s WHERE pfullname =%s ", [s[:-1],data1[1]])
cur.execute(
"UPDATE cric_players SET pprofile =%s WHERE pfullname =%s ",[data2[1:],data1[1]])
cur.execute(
"UPDATE cric_players SET pcountry =%s WHERE pfullname =%s ", [country[:-1],data1[1]])
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error, e:
print "Mysql Error 1 "
def showall_db(**args):
try:
conn = db = MySQLdb.connect("localhost","root","","cricket_crawler" )
cur = conn.cursor()
flag=1
if not args:
cur.execute("SELECT * FROM cric_players")
if args.get("arg1")=="pname" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE pname LIKE %s ",[args.get("arg2")])
if args.get("arg1")=="pcountry" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE pcountry LIKE %s ",[args.get("arg2")])
if args.get("arg1")=="pbatt" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE pbatt LIKE %s ",[args.get("arg2")])
if args.get("arg1")=="prole" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE prole LIKE %s ",[args.get("arg2")])
if args.get("arg1")=="pbowl" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE pbowl LIKE %s ",[args.get("arg2")])
if args.get("arg1")=="pfield" and args.get("arg2"):
cur.execute("SELECT * FROM cric_players WHERE pfield LIKE %s ",[args.get("arg2")])
results = cur.fetchall()
try:
results[0]
except:
return "Data not Found"
disp=""
for row in results:
pname=row[0]
pfullname=row[1]
pcountry=row[2]
pborn=row[3]
pcage=row[4]
pmteam=row[5]
pnick=row[6]
prole=row[7]
pbatt=row[8]
pbowl=row[9]
pfield=row[10]
pprofile=row[11]
if not args.get("arg3") or args.get("arg3")=="All Info":
print "Player Name = %s Fullname = %s, Country = %s, Born on = %s, CurrentAge = %s, MajorTeam = %s, Nick = %s, Role = %s, Batting = %s, Bowling = %s, Field = %s,\n Profile = %s" %(pname, pfullname, pcountry, pborn, pcage, pmteam, pnick, prole, pbatt, pbowl, pfield, pprofile)
disp=disp+"Player Name = "+ pname +"\nFullname = "+pfullname+",\nCountry = "+pcountry+",\nBorn on = "+pborn+",\nCurrentAge = "+pcage+",\nMajorTeam = "+pmteam+",\nNick = "+pnick+",\nRole = "+prole+",\nBatting = "+pbatt+",\nBowling = "+pbowl+", Field = "+pfield+",\n Profile = "+ pprofile
if args.get("arg3")=="Player Name":
print "Player Name = %s"%pname
disp=disp+"Player Name = "+ pname+"\n"
if args.get("arg3")=="Country":
print "Player Country = %s"%pcountry
disp=disp+"Player Country = "+pcountry+"\n"
if args.get("arg3")=="Born":
print "Born = %s"%pborn
disp=disp+"Born = "+pborn+"\n"
if args.get("arg3")=="Current Age":
print "Current Age = %s"%pcage
disp=disp+"Current Age = "+pcage+"\n"
if args.get("arg3")=="Major Teams":
print "Played For = %s"%pmteam
disp=disp+"Played For = "+pmteam+"\n"
if args.get("arg3")=="Profile":
print "Player Profile = %s"%pprofile
disp=disp+"Player Profile = "+pprofile+"\n"
conn.commit()
cur.close()
conn.close()
return disp
except MySQLdb.Error, e:
print "Mysql Error 2"
return 0
def modify(data):
data2=data.pop(-1)
s=""
for i in range(len(data2)):
if(data2[i] in [',','.']):
s=s+"\n"
else:
s=s+data2[i]
data2=s
data1,s,country=modify2(data)
return data1,s,country,data2
def modify2(data):
s=""
country=""
flag=0
for word in data[:]:
if word.endswith(','):
s=s+word
if not flag:
country=country+word
flag=1
data.remove(word)
if word is '':
data.remove(word)
return data,s,country | [
"vutsuak96@gmail.com"
] | vutsuak96@gmail.com |
10e861e97fb393497526eeecdf883da17d4c4adf | 22bf2e01d86dbb69efffc6e7365ac952f0692ac5 | /tests/s3_spec.py | 3ed4d75f0b5d62cb3d4d95b922aa7159b32dc9b4 | [
"ISC"
] | permissive | uk-gov-mirror/dwp.dataworks-corporate-storage-coalescence | 8da3f141601865de5fd2292834acef5e92a9a618 | 8b1c19874d55dffdd77a86a3a6e83217c4089a09 | refs/heads/master | 2023-03-28T21:34:30.679722 | 2021-04-12T16:45:12 | 2021-04-12T16:45:12 | 356,707,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,585 | py | import unittest
from functools import reduce
from unittest.mock import Mock, MagicMock, call, ANY
from botocore.exceptions import ClientError
from botocore.response import StreamingBody
from utility.s3 import S3, s3_client
class S3Spec(unittest.TestCase):
def setUp(self):
self.bucket = 'bucket'
def test_coalesce(self):
client = self.__client()
s3 = S3(client)
s3.coalesce_batch(self.bucket, self.__batch(), False)
key = "corporate_storage/ucfs_audit/2020/11/05/data/businessAudit/data.businessAudit_2_0_99999.jsonl.gz"
client.upload_fileobj.assert_called_once_with(ANY, self.bucket, key)
def test_no_overwrite(self):
client = self.__client()
s3 = S3(client)
def exists(**kwargs):
if kwargs['Key'].endswith("gz") or kwargs['Key'].endswith("gz.2"):
return MagicMock()
raise MagicMock(ClientError)
client.head_object = Mock(side_effect=exists)
s3.coalesce_batch(self.bucket, self.__batch(), False)
key = "corporate_storage/ucfs_audit/2020/11/05/data/businessAudit/data.businessAudit_2_0_99999.jsonl.gz.3"
client.upload_fileobj.assert_called_once_with(ANY, self.bucket, key)
def test_object_summaries(self):
client = s3_client(True)
s3 = S3(client)
objects = [self.__summaries(x) for x in range(10)]
contents = [x['Contents'] for x in [xs for xs in [ys for ys in objects]]]
expected = reduce(lambda acc, xs: acc + xs, contents)
objects[-1]['IsTruncated'] = False
client.list_objects_v2 = Mock(side_effect=objects)
actual = []
for sub_batch in s3.object_summaries(self.bucket, 'prefix', 5):
actual += sub_batch
self.assertEqual(expected, actual)
def test_batches_are_deleted(self):
batch = [{'object_key': f'prefix/{i}'} for i in range(5)]
deletes = [{'Key': x['object_key']} for x in batch]
calls = [call(Bucket=self.bucket, Delete={'Objects': deletes})]
client = s3_client(True)
client.delete_objects = MagicMock(return_value={})
s3 = S3(client)
s3.delete_batch(self.bucket, batch)
client.delete_objects.assert_has_calls(calls)
def test_batches_are_deleted_in_chunks(self):
batch = [{'object_key': f'prefix/{i}'} for i in range(3500)]
sub_batch1 = [{'Key': f'prefix/{i}'} for i in range(1000)]
sub_batch2 = [{'Key': f'prefix/{i}'} for i in range(1000, 2000)]
sub_batch3 = [{'Key': f'prefix/{i}'} for i in range(2000, 3000)]
sub_batch4 = [{'Key': f'prefix/{i}'} for i in range(3000, 3500)]
calls = [call(Bucket=self.bucket, Delete={'Objects': x}) for x in
[sub_batch1, sub_batch2, sub_batch3, sub_batch4]]
client = s3_client(True)
client.delete_objects = MagicMock(return_value={})
s3 = S3(client)
s3.delete_batch(self.bucket, batch)
client.delete_objects.assert_has_calls(calls)
def __client(self):
objects = [self.__s3_object_with_body(i) for i in range(1000)]
client = s3_client(True)
client.get_object = Mock(side_effect=objects)
client.upload_fileobj = Mock()
return client
def __batch(self):
return [self.__batch_item(i) for i in range(1000)]
def __summaries(self, index: int) -> dict:
return {'IsTruncated': True, 'NextContinuationToken': index, 'Contents': self.__contents(index)}
def __contents(self, index: int) -> list:
return [self.__s3_object(index, x) for x in range(100)]
@staticmethod
def __s3_object(summary_index, content_index):
return {'Key': f"{summary_index}/{content_index}", 'Size': 100}
@staticmethod
def __batch_item(i: int) -> dict:
return {
"object_key": f"corporate_storage/ucfs_audit/2020/11/05/data/businessAudit/"
f"data.businessAudit_2_{i}-190299.jsonl.gz",
"topic": "data.businessAudit",
"partition": 2,
"start_offset": i * 100,
"end_offset": i * 100 + 99,
"size": 10_000
}
def __s3_object_with_body(self, i: int) -> dict:
body = StreamingBody(raw_stream=MagicMock(), content_length=100)
rv = self.__s3_object_contents(i)
body.read = MagicMock(return_value=rv)
return dict(Body=body)
@staticmethod
def __s3_object_contents(i: int) -> bytes:
return f"S3 OBJECT CONTENTS {i}\n".encode()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | uk-gov-mirror.noreply@github.com |
9266fb8c63b4fbbc68441b1ae6b7faa657d25ca9 | e13a648f8b431ca9d1a1daf0b7f5e7d387e0f1c2 | /w1d2/03_fastcampus.py | 39e3443a8b4f85474df90abb0ccd67275a72e10b | [] | no_license | firstock/data_camp_wcr | 7a1a355929ce822f5c6a748652156850fab841fa | 5724183fb1bac20f9f5ca98b95d67e35a91dab32 | refs/heads/master | 2020-03-08T13:21:17.506470 | 2017-12-09T01:34:47 | 2017-12-09T01:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | import requests
from bs4 import BeautifulSoup as bs
req = requests.get('http://www.fastcampus.co.kr/category_data_camp/')
html = req.text
soup = bs(html, 'lxml')
# 이렇게 전부 써줄 수도 있지만
# lecture_list = soup.select(
# '#page-section-6 > div.page-section-content.vc_row-fluid.mk-grid > '
# 'div.mk-padding-wrapper > div > div > '
# 'div > div > div > div > '
# 'div.padding_box > p.line_3'
# )
# 이것처럼 줄여 쓸 수도 있어요.
lecture_list = soup.select(
'div.padding_box > p.line_3'
)
for lecture in lecture_list:
# 엔터 태그(br)은 '\n'으로 나옵니다.
# 따라서 .replace()를 통해 스페이스 하나로 바꿔줍니다.
print(lecture.text.replace('\n',' ').strip())
| [
"latheledusjp@gmail.com"
] | latheledusjp@gmail.com |
79d002fabb504c286b851cc736422bbcefc17157 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_1/Python/jbryan/B.py | e9e36e04b058ba7c4625dc7929a20c6bad5aa7f5 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from math import *
from itertools import accumulate
input_file = open('B-large.in.txt','r')
raw_input = input_file.read()
lines = raw_input.split('\n')
num_cases = int(lines[0])
case_num = 1
output_text = ''
output_file = open('B.txt','w')
debug=0
###################
while case_num<=num_cases:
cakes=lines[case_num]
output_text += ('Case #'+str(case_num)+': ')
if debug: print(cakes)
num_cakes = len(cakes)
orientation = '+'
flips = 0
for i in range(num_cakes-1,-1,-1):
if cakes[i] != orientation:
flips += 1
orientation = cakes[i]
output_text += str(flips)+'\n'
case_num += 1
if debug: print('\n'+output_text)
output_file.write(output_text)
input_file.close()
output_file.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
3052b18c72078d3049a7a7493df32914f60f7452 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2908/61519/254691.py | 8524e718a75692e7ed55f02a3cb9d57579194c65 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | num=int(input())
list1=[]
for i in range(0,num):
word=input()
list1.append(word)
for i in range(0,num):
list1[i]=sotred(list1[i])
sorted(list1)
out=1
for i in range(1,num):
if list1[i]!=list1[i-1]:
out=out+1
print(out) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
5c96b58e262196b5db5709bda8f16d0841f77157 | 1abcd4686acf314a044a533d2a541e83da835af7 | /backjoon_level_python/1182.py | 9f82a17a948aac594e6209c5743d6fbbb5330ab1 | [] | no_license | HoYoung1/backjoon-Level | 166061b2801514b697c9ec9013db883929bec77e | f8e49c8d2552f6d62be5fb904c3d6548065c7cb2 | refs/heads/master | 2022-05-01T05:17:11.305204 | 2022-04-30T06:01:45 | 2022-04-30T06:01:45 | 145,084,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | answer = 0
# 29380KB, 728ms
# def dfs1(depth, before_sum, before_idx):
# global answer
#
# if depth != 0 and before_sum == S:
# answer += 1
#
# for idx in range(before_idx, N):
# if visited[idx] is False:
# visited[idx] = True
# dfs(depth + 1, before_sum + numbers[idx], idx+1)
# visited[idx] = False
# 29380KB, 516ms
def dfs2(i, current_sum):
global answer
if i == N:
return
if current_sum + numbers[i] == S:
answer += 1
dfs2(i+1, current_sum)
dfs2(i+1, current_sum + numbers[i])
def solve():
# dfs1(0, 0, 0)
dfs2(0, 0)
return answer
if __name__ == '__main__':
N, S = map(int, input().split())
numbers = list(map(int, input().split()))
visited = [False] * N
print(solve()) | [
"empire1641@gmail.com"
] | empire1641@gmail.com |
eb347de9d326a8d909e87762c69e2c73016485c3 | 1d182c8cf1ce19019e0b1cba4a16ee1a2a49751e | /scripts/cornernet/demo_cornernet.py | 62b247f95a35e38703776622b5d517394d78c8be | [
"MIT"
] | permissive | zxt881108/pytorch-cv | e30ac8638a8819b637c6bbef717f733264229126 | 6f2d1760f12c9a56a3e7b19ba74bc41451ea284c | refs/heads/master | 2020-06-18T18:16:09.741626 | 2019-04-29T14:11:06 | 2019-04-29T14:11:06 | 196,396,348 | 5 | 0 | null | 2019-07-11T13:06:29 | 2019-07-11T13:06:28 | null | UTF-8 | Python | false | false | 2,061 | py | # TODO unfinish
# import os
# import sys
# import argparse
# import cv2
# import numpy as np
# import matplotlib.pyplot as plt
# import torch
#
# cur_path = os.path.dirname(__file__)
# sys.path.insert(0, os.path.join(cur_path, '../..'))
# from model.model_zoo import get_model
# from utils.viz.bbox import plot_bbox
#
# def parse_args():
# parser = argparse.ArgumentParser(description='Demo with CornerNet networks.')
# parser.add_argument('--network', type=str, default='corner_squeeze_hourglass_coco',
# help="CenterNet full network name")
# parser.add_argument('--images', type=str, default=os.path.join(cur_path, '../png/biking.jpg'),
# help='Test demo images.')
# parser.add_argument('--cuda', action='store_true', default=True,
# help='demo with GPU')
# parser.add_argument('--root', type=str, default=os.path.expanduser('~/.torch/models'),
# help='Default pre-trained model root.')
# parser.add_argument('--pretrained', type=str, default='True',
# help='Load weights from previously saved parameters.')
# parser.add_argument('--thresh', type=float, default=0.3,
# help='Threshold of object score when visualize the bboxes.')
# parser.add_argument('--flip-test', action='store_true', default=False,
# help='Using flipping test')
# parser.add_argument('--reg_offset', action='store_true', default=True,
# help='Using regression offset')
# parser.add_argument('--topK', type=int, default=100, help='number of top K results')
# parser.add_argument('--scale', type=float, default=1.0, help='ratio scale')
#
# args = parser.parse_args()
# return args
#
#
# if __name__ == '__main__':
# args = parse_args()
# device = torch.device('cpu')
# if args.cuda:
# device = torch.device('cuda')
#
# image = cv2.imread(args.images)
# net = get_model(args.network, pretrained=True, root=args.root)
#
| [
"tinyshine@yeah.net"
] | tinyshine@yeah.net |
66586acab2e4ceb2137ca2e5476a3bb6afe4a3a8 | 3a6a211ea0d32405497fbd6486c490bb147e25f9 | /third_party/gsutil/gslib/utils/rsync_util.py | fc9bd57cbb3aa86f53f3b9eadda5d7151f6c83f5 | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | catapult-project/catapult | e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0 | 53102de187a48ac2cfc241fef54dcbc29c453a8e | refs/heads/main | 2021-05-25T07:37:22.832505 | 2021-05-24T08:01:49 | 2021-05-25T06:07:38 | 33,947,548 | 2,032 | 742 | BSD-3-Clause | 2022-08-26T16:01:18 | 2015-04-14T17:49:05 | HTML | UTF-8 | Python | false | false | 1,885 | py | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility structures and methods for rsync functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
class DiffAction(object):
"""Enum class representing possible actions to take for an rsync diff."""
COPY = 'copy'
REMOVE = 'remove'
MTIME_SRC_TO_DST = 'mtime_src_to_dst'
POSIX_SRC_TO_DST = 'posix_src_to_dst'
class RsyncDiffToApply(object):
"""Class that encapsulates info needed to apply diff for one object."""
def __init__(self, src_url_str, dst_url_str, src_posix_attrs, diff_action,
copy_size):
"""Constructor.
Args:
src_url_str: (str or None) The source URL string, or None if diff_action
is REMOVE.
dst_url_str: (str) The destination URL string.
src_posix_attrs: (posix_util.POSIXAttributes) The source POSIXAttributes.
diff_action: (DiffAction) DiffAction to be applied.
copy_size: (int or None) The amount of bytes to copy, or None if
diff_action is REMOVE.
"""
self.src_url_str = src_url_str
self.dst_url_str = dst_url_str
self.src_posix_attrs = src_posix_attrs
self.diff_action = diff_action
self.copy_size = copy_size
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
b57a86919dbcf8460279490a88f7f13eaf696b48 | 890b089c87b247cb06613530277c04d0eb6e3e91 | /pymusas/taggers/rules/rule.py | e25d66e8ff1ccfb007bfe3e2225b150457e7f594 | [
"Apache-2.0"
] | permissive | UCREL/pymusas | e5978a400f1bbe783dbda5c736fce2dd546f0416 | 2afc3e919f0ffb1de42fda169c18fa757db0307a | refs/heads/main | 2023-09-03T15:52:38.396275 | 2023-08-17T22:34:46 | 2023-08-17T22:34:46 | 405,042,010 | 22 | 8 | Apache-2.0 | 2023-05-10T13:15:59 | 2021-09-10T10:26:50 | Python | UTF-8 | Python | false | false | 1,605 | py | from abc import abstractmethod
from typing import List
from pymusas.base import Serialise
from pymusas.rankers.ranking_meta_data import RankingMetaData
class Rule(Serialise):
'''
An **abstract class** that defines the basic method, `__call__`, that is
required for all :class:`Rule`s.
A Rule when called, `__call__`, creates a `List` of rules matches for each
token, whereby each rule matched is defined by the
:class:`pymusas.rankers.ranking_meta_data.RankingMetaData` object. These
rules matches per token can then be, optionally, combined with other rule
matches per token from other :class:`Rule` classes to then be ranked by a
:class:`pymusas.rankers.lexicon_entry.LexiconEntryRanker`.
'''
@abstractmethod
def __call__(self, tokens: List[str], lemmas: List[str],
pos_tags: List[str]) -> List[List[RankingMetaData]]:
'''
For each token it returns a `List` of rules matches defined by the
:class:`pymusas.rankers.ranking_meta_data.RankingMetaData` object.
Each `List` of `tokens`, `lemmas`, and `pos_tags` are assumed to be of
equal length.
# Parameters
tokens : `List[str]`
The tokens that are within the text.
lemmas : `List[str]`
The lemmas of the tokens.
pos_tags : `List[str]`
The Part Of Speech tags of the tokens.
# Returns
`List[List[RankingMetaData]]`
'''
... # pragma: no cover
@abstractmethod
def __eq__(self, other: object) -> bool:
... # pragma: no cover
| [
"andrew.p.moore94@gmail.com"
] | andrew.p.moore94@gmail.com |
85e9c06d68f300859300816e96de5d257ba51870 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_scurvy.py | 1f76884d7d5e6a459982bd8304f7acafaa46dd20 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py |
#calss header
class _SCURVY():
def __init__(self,):
self.name = "SCURVY"
self.definitions = [u'an illness of the body tissues that is caused by not having enough vitamin C']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3584e07d9cf1564c26308f7c88978562513a52d7 | 28badfbfa1e1325ffb9da62e92e0b524e747f8e1 | /48. Rotate Image/48.py | 11c295917af85558d331f3ec875692967abe99c3 | [] | no_license | saransappa/My-leetcode-solutions | b53fab3fc9bcd96ac0bc4bb03eb916820d17584c | 3c5c7a813489877021109b152b190456cdc34de6 | refs/heads/master | 2021-08-16T13:52:33.230832 | 2021-07-30T11:54:06 | 2021-07-30T11:54:06 | 217,449,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
ans = []
for i in range(len(matrix[0])):
k = []
for j in matrix:
k.append(j[i])
k = k[::-1]
ans.append(k)
for i in range(len(matrix)):
matrix[i] = ans[i] | [
"saran.sappa@gmail.com"
] | saran.sappa@gmail.com |
e349b49a91155b446788b8a00c135dc6f791b236 | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot2_WS/build/uuid_msgs/catkin_generated/pkg.installspace.context.pc.py | c171e755d8e4801a98dbf8e963993060f3f8aa61 | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot2_WS/install/include;/usr/include".split(';') if "/xavier_ssd/TrekBot/TrekBot2_WS/install/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "uuid_msgs"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/install"
PROJECT_VERSION = "1.0.6"
| [
"Rafcin.s@gmail.com"
] | Rafcin.s@gmail.com |
c4a83520a803ec589f6f377a1a6858de6f3607d7 | 0129b016055daa1aaa1e9e0911f271fa7b38e27e | /programacao_estruturada/20192_166/Bimestral2_166_20192/Parte_1/Laiane sousa/1questão.py | 32ecce1262bb203d49e56e3d1df54d48d06e2537 | [] | no_license | rogeriosilva-ifpi/teaching-tds-course | 7c43ff17d6677aef7b42071929b3de8361748870 | 771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c | refs/heads/master | 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | def programa():
nota1 = input('Digite nota: ')
nota2 = input('Digite nota: ')
nota3 = input('Digite nota:')
nota4 = input('Digite nota: ')
media_escola = input('digite media')
media = (nota1 + nota2 + nota3 + nota4) / 4
if media >= media_escola:
return aprovado
else:
reprovado
programa()
| [
"rogerio.silva@ifpi.edu.br"
] | rogerio.silva@ifpi.edu.br |
e7680f7fe8adcd7d86910a0bf028f65b592e152a | 904b4b7cd6b1709e9aded92737766a3b5a978838 | /mult_naive_corr.py | 64fb0afa069a3f25dfa64338cce179baf4d45034 | [] | no_license | NicolasLagaillardie/Python | 3ec7aa6eb21ffa86fad33060bb53e42cb7957dc9 | a30037d688d8f11a195d7fa611347528c313d71b | refs/heads/master | 2020-03-30T13:48:27.038592 | 2018-10-02T16:54:42 | 2018-10-02T16:54:42 | 151,288,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def mult_naive_cor(poly1,poly2):
c=[0 for i in range(0,len(poly1)+len(poly2)-1)]
for i in range(0,len(poly1)):
for j in range(0,len(poly2)):
c[i+j]=c[i+j]+poly1[i]*poly2[j]
return c
| [
"lagaillardie.nicolas@live.fr"
] | lagaillardie.nicolas@live.fr |
84078cae2e8fffe1e5f906bfa882e054f145980c | e6d862a9df10dccfa88856cf16951de8e0eeff2b | /VMS/provisioning/python-aiohttp/api_server/controllers/provisioning_controller.py | 48fe6b4ef1c1144406318cebbad7dcd6558f9146 | [] | no_license | AllocateSoftware/API-Stubs | c3de123626f831b2bd37aba25050c01746f5e560 | f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0 | refs/heads/master | 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 | C# | UTF-8 | Python | false | false | 1,073 | py | from typing import List, Dict
from aiohttp import web
from api_server.models.structure_provision import StructureProvision
from api_server.models.user_provision import UserProvision
from api_server import util
async def provision_structure(request: web.Request, id, body) -> web.Response:
"""provision_structure
Provision the supplied structure.
:param id: ID of the customer to provision
:type id: str
:param body: Structure of the Customer (trust). This call should be considered a 'set' - elements omitted that were present in previous provisionings should be removed.
:type body: dict | bytes
"""
body = StructureProvision.from_dict(body)
return web.Response(status=200)
async def provision_user(request: web.Request, id, body) -> web.Response:
"""provision_user
Provision the supplied user.
:param id: ID of the user to provision
:type id: str
:param body: Structure of the User
:type body: dict | bytes
"""
body = UserProvision.from_dict(body)
return web.Response(status=200)
| [
"nigel.magnay@gmail.com"
] | nigel.magnay@gmail.com |
86bca72dbf6f5a0ffed15d6f53116cc782bcc563 | b9806499e8ae050fe1be511e19c8e48f441ca0ae | /cache_test.py | da73433cb3f3f9cc46ddd1aafbc4f845148e7f33 | [] | no_license | renato145/sanic-test | 9fc36c3df5a95d87ef6d6ae33f483b58329b99c0 | 0961109fa4413aabc124873bdbf3e8158454d675 | refs/heads/master | 2021-01-12T19:16:54.925620 | 2017-09-15T02:15:58 | 2017-09-15T02:15:58 | 81,355,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | import asyncio
import aioredis
from time import time
from sanic import Sanic
from sanic.response import text
app = Sanic(__name__)
loop = None
redis = None
async def init(sanic, _loop):
global loop
global redis
loop = _loop
redis = await aioredis.create_redis(('localhost', 6379), loop=loop)
@app.route('/')
async def app_root(request):
i = await redis.incr('my-key')
return text('This is chewbacca #%d.\n' % i)
@app.route('/save/<name:string>')
async def save_x(request, name):
await redis.rpush('names', name)
return text('Saved: %s\n' % name)
@app.route('/remove/<name:string>')
async def remove_x(request, name):
return text('Removed: %s\n' % name)
app.run(host='0.0.0.0', port=8000, before_start=init, workers=4)#, debug=True)
# curl -X POST -F image=@t.jpg 'http://localhost:8000/get-label' ; echo ""
# curl http://localhost:8000/get-label?url=http://www.freephotosbank.com/photographers/photos1/60/med_556d65fb50a408d93f7141963b542250.jpg; echo"" | [
"renato145@hotmail.com"
] | renato145@hotmail.com |
6ff5e05f78cf6526867df884061e44b5d80a591e | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=93/sched.py | a3740e38f901709b5d26464ba2f889af2c75b968 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | -S 0 -X RUN -Q 0 -L 2 79 250
-S 1 -X RUN -Q 0 -L 2 67 250
-S 0 -X RUN -Q 0 -L 2 61 200
-S 0 -X RUN -Q 0 -L 2 54 175
-S 4 -X RUN -Q 1 -L 1 53 300
-S 4 -X RUN -Q 1 -L 1 46 175
-S 4 -X RUN -Q 1 -L 1 45 200
-S 4 -X RUN -Q 1 -L 1 40 300
-S 3 -X RUN -Q 2 -L 1 38 400
-S 3 -X RUN -Q 2 -L 1 34 150
-S 3 -X RUN -Q 2 -L 1 32 125
-S 3 -X RUN -Q 2 -L 1 32 100
-S 2 -X RUN -Q 3 -L 1 31 250
-S 2 -X RUN -Q 3 -L 1 24 150
-S 2 -X RUN -Q 3 -L 1 21 100
-S 2 -X RUN -Q 3 -L 1 19 175
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
55743a73e88bf0e11f5b2e0fa31f1544b5e2a1de | 8fc2ab3d29a30e603e19b30bb9517928de529167 | /BFS.py | d62fa20aa9eb7a17b0a83d2a8afc67a255daff76 | [] | no_license | rushilchugh/Practise | 35a9861bec6786580dc0a440eb25d78e43cb7bc9 | 98fd593b95dad641bef1d519c6c6ed1daaae630f | refs/heads/master | 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | __author__ = 'Rushil'
from Code2.graphs import adj_list
def BFS(adj_list, S):
parent = {S: None}
level = {S: 0}
i = 1
nodes_to_visit = [S]
while nodes_to_visit:
next_nodes = []
for u in nodes_to_visit:
neighbors = adj_list[u]
for v in neighbors:
if v not in parent.keys():
parent[v] = u
level[v] = i
next_nodes.append(v)
nodes_to_visit = next_nodes
i += 1
return parent, level
print(BFS(adj_list, "S")) | [
"noreply@github.com"
] | rushilchugh.noreply@github.com |
82a9dd120753f5851366c83fce712240441c5ca5 | 3fb0ce33f00b96ae3808a32da44de3e887434afb | /.提出一覧/AtCoder/abc182/c/main.py | c11d5278fa03a2153f4e29ca8f88f439d4aa3e09 | [] | no_license | Yukikazari/kyoupuro | ca3d74d8db024b1988cd0ff00bf069ab739783d7 | 343de455c4344dbcfa4524b492f7f6205c9db26f | refs/heads/master | 2023-02-21T01:53:52.403729 | 2021-01-27T03:55:01 | 2021-01-27T03:55:01 | 282,222,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | #!/usr/bin/env python3
#import
#import math
#import numpy as np
#= int(input())
N = input()
dic = {i: 0 for i in range(3)}
for n in N:
nn = int(n)
dic[nn % 3] += 1
t = (dic[1] + dic[2] * 2) % 3
if t == 0:
print(0)
elif len(N) > 1 and t == 1 and dic[1] > 0:
print(1)
elif len(N) > 1 and t == 2 and dic[2] > 0:
print(1)
elif len(N) > 2 and t == 2 and dic[1] > 1:
print(2)
elif len(N) > 2 and t == 1 and dic[2] > 1:
print(2)
else:
print(-1) | [
"haya_nanakusa793@yahoo.co.jp"
] | haya_nanakusa793@yahoo.co.jp |
f52d7c5cd77ef629f03ff51153d8f00534c6303e | 562d4bf000dbb66cd7109844c972bfc00ea7224c | /addons-clarico/clarico_customer_carousel/__manifest__.py | fc405b4009c00a937f09251d4ac642c8ca63d450 | [] | no_license | Mohamed33/odoo-efact-11-pos | e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059 | de38355aea74cdc643a347f7d52e1d287c208ff8 | refs/heads/master | 2023-03-10T15:24:44.052883 | 2021-03-06T13:25:58 | 2021-03-06T13:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | {
# Theme information
'name' : 'Clarico Customer Carousel',
'category' : 'Website',
'version' : '1.0',
'summary': 'Showcase your Customers using clarico Customer Carousel',
'description': """""",
# Dependencies
'depends': [
'clarico_carousel','clarico_snippets'
],
# Views
'data': [
'templates/assets.xml',
'templates/customer_carousel_snippet.xml',
'templates/customer_carousel_snippent_option.xml',
'view/res_partner.xml'
],
# Author
'author': 'Emipro Technologies Pvt. Ltd.',
'website': 'http://www.emiprotechnologies.com',
# Technical
'installable': True,
}
| [
"root@vmi414107.contaboserver.net"
] | root@vmi414107.contaboserver.net |
1b4b74da262ce3b8d32231582e15ae363e57dbf0 | 3d8aae8aa43e0fbd8a8cffc4fa2cd67419059d66 | /module_PyQt/tech_lecture_codetorial/ex510/ex516_QDateEdit.py | a3d7f7f22fdec26289d7bcf1ec40d3f62df26fcd | [] | no_license | onitonitonito/k_mooc_reboot | b8273b7e9fa3fc5958bca57c39f2f3a9108964f1 | 68c8c6a94adc99005fb0fc8c38c416f902d37888 | refs/heads/main | 2021-07-21T22:32:26.080330 | 2021-07-04T02:22:08 | 2021-07-04T02:22:08 | 109,581,972 | 0 | 0 | null | 2020-05-05T22:28:26 | 2017-11-05T13:30:03 | Python | UTF-8 | Python | false | false | 1,573 | py | """
# Ex 5.16_QDateEdit.py - SpinBox to Edit Date
http://codetorial.net/pyqt5/widget/qdateedit.html
"""
# https://doc.qt.io/qt-5/qdateedit.html
# ...
# - QCalendarWidget
# - QSpinBox
# - QDoubleSpinBox
# - QDateEdit *
# - QTimeEdit
# - QDateTimeEdit
# - QTextBrowser
# - QTextBrowser (Advanced)
# - QTextEdit
print(__doc__)
import sys
from PyQt5.QtCore import QDate
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QLabel,
QDateEdit,
QVBoxLayout,
)
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.title = 'Ex5.16_QDateEdit - SpinBox to Edit Date'
self.posXY = (600, 45)
self.windowSize = (300, 100)
self.initUI()
def initUI(self):
lbl = QLabel('QDateEdit')
dateedit = QDateEdit(self)
dateedit.setDate(QDate.currentDate())
dateedit.setMinimumDate(QDate(1900, 1, 1))
dateedit.setMaximumDate(QDate(2100, 12, 31))
# dateedit.setDateRange(QDate(1900, 1, 1), QDate(2100, 12, 31))
vbox = QVBoxLayout()
vbox.addWidget(lbl)
vbox.addWidget(dateedit)
vbox.addStretch()
self.setLayout(vbox)
self.show_basic()
def show_basic(self):
self.setWindowTitle(self.title)
self.setGeometry(*self.posXY, *self.windowSize)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MyApp()
sys.exit(app.exec_())
| [
"nitt0x0@gmail.com"
] | nitt0x0@gmail.com |
e8967149ac768480655eb91b081ef04ccc4b7543 | 3d7aa41fc75257173cbd89d8999340bfd8e870d8 | /hw3/PLot_Bottom_Frames.py | 63542292bf9059c469a2147c61081e523a377a2c | [] | no_license | nbren12/numerical_methods2 | eeadc6be2b957bada39f59af30ab8a2393c1f2b0 | 5223ced886ed54a49d5860d707012ef2fd71f4e6 | refs/heads/master | 2021-01-10T19:38:45.660122 | 2014-05-13T21:18:20 | 2014-05-13T21:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | # Python 2.7.x file for Assignment 3 of Numerical Methods II, Spring 2014
# http://www.math.nyu.edu/faculty/goodman/teaching/NumericalMethodsII2014/index.html
# The author gives permission for anyone to use this publically posted
# code for any purpose. The code was written for teaching, not research
# or commercial use. It has not been tested thoroughly and probably has
# serious bugs. Results may be inaccurate, incorrect, or just wrong.
# File: PlotFrames.py
# Make frames for a movie from data from a Python formatted plot file
import runOutput as ro
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import string as st
# copy the data elements to interman python variables with the same names
data = ro.RunData()
L = data['L'] # The length of the interval
tf = data['tf'] # The time between frames
fMin = data['fMin'] # The smallest u value, for plotting
fMax = data['fMax'] # The largest u value, for plotting
runString = data['runString'] # A string that describes the run
frames = data['frames'] # The curves to be plotted
nf, nx = frames.shape
print 'nx = %i, nf = %i'%(nx, nf)
PI = np.pi
xm = np.linspace(0, L, nx, endpoint=False)
bbar = .5
bottom = -1.0+ bbar * ( xm > 9.0/20.0 * L) * (xm < 11.0/20.0 * L) * np.sin( 4 * PI / L * ( xm - 9.0/20.0 * L ) );
# Create initial plot
fig, ax = plt.subplots(1)
curve2, = ax.plot(xm, bottom, linewidth=1.0, color= 'b', alpha=.5)
curve1, = ax.plot(xm, frames[0,:], linewidth=5.0, color= 'k', alpha=.5)
plt.axis([0., L, fMin, fMax])
plt.grid(axis='both')
frame = 0
textString = 'frame %i, elapsed time: %f'%(frame, frame*tf)
textbox= plt.text(.1*L, .9*fMax, textString)
plt.title(runString)
def updatefig(frame):
"""
Will pass this function to update plot to FuncAnimation
"""
print frame
curve1.set_ydata(frames[frame,:])
FrameFileName = "WaveMovieFrames/frame%i.png"%frame
textString = 'frame %i, elapsed time: %f'%(frame, frame*tf)
textbox.set_text(textString)
return curve1,
updatefig(50)
plt.savefig('bottom_frame50.png')
ani = animation.FuncAnimation(fig, updatefig, xrange(nf), interval=75, blit=True)
ani.save('WaveMovie_bottom.mp4')
| [
"nbren12@gmail.com"
] | nbren12@gmail.com |
588262143c5c81fa4413ec3ec3571683457ed78c | d063684dd03293eb0f980568af088d26ab087dbe | /debadmin/migrations/0104_states.py | 3db4e98a7fdda2ed70782791263b560a2d5237cd | [] | no_license | abhaysantra/debscientific | ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2 | 88ec65616fd24052bbdbba8b00beba85493f5aea | refs/heads/master | 2020-11-26T22:09:33.820247 | 2019-12-20T07:58:43 | 2019-12-20T07:58:43 | 229,213,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Generated by Django 2.2.6 on 2019-12-09 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0103_countries'),
]
operations = [
migrations.CreateModel(
name='states',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('country_id', models.IntegerField(null=True)),
],
),
]
| [
"abhay.santra@gmail.com"
] | abhay.santra@gmail.com |
f2d6047367dda9b27db1c899a7cadc60f883e2f6 | 6cf82bbb8c17bdede1951856494949486aead76c | /Chapter_04/src/04_stringslice.py | 71038a43f6e1835ce465ff75ec748028a68f86b3 | [] | no_license | archeranimesh/DiveIntoPython03 | db8449c319e8dcbf0770d8514c4a3bb1f7f7c042 | 8753427fca99db130d76131800480b50c2bc25c8 | refs/heads/master | 2021-01-21T19:39:28.919800 | 2017-06-05T10:03:52 | 2017-06-05T10:03:52 | 92,146,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | a_string = "My alphabet starts where your alphabet ends."
print("a_string:: ", a_string)
print("a_string[3:11]: ", a_string[3:11])
print("a_string[3:-3]: ", a_string[3:-3])
print("a_string[0:2]: ", a_string[0:2])
print("a_string[:18]: ", a_string[:18])
print("a_string[18:]: ", a_string[18:])
| [
"animeshb@archerimagine.com"
] | animeshb@archerimagine.com |
99deb50dd9a7a80a26654a5d26fabac2f313f656 | e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67 | /azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/web/models/vnet_info.py | 9df73b4a463b71c766423675621007237a5f1a1d | [
"MIT"
] | permissive | EnjoyLifeFund/macHighSierra-cellars | 59051e496ed0e68d14e0d5d91367a2c92c95e1fb | 49a477d42f081e52f4c5bdd39535156a2df52d09 | refs/heads/master | 2022-12-25T19:28:29.992466 | 2017-10-10T13:00:08 | 2017-10-10T13:00:08 | 96,081,471 | 3 | 1 | null | 2022-12-17T02:26:21 | 2017-07-03T07:17:34 | null | UTF-8 | Python | false | false | 2,406 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VnetInfo(Model):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when
sending a request.
:param vnet_resource_id: The Virtual Network's resource ID.
:type vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:param cert_blob: A certificate file (.cer) blob containing the public key
of the private key used to authenticate a
Point-To-Site VPN connection.
:type cert_blob: str
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list of :class:`VnetRoute
<azure.mgmt.web.models.VnetRoute>`
:ivar resync_required: <code>true</code> if a resync is required;
otherwise, <code>false</code>.
:vartype resync_required: bool
:param dns_servers: DNS servers to be used by this Virtual Network. This
should be a comma-separated list of IP addresses.
:type dns_servers: str
"""
_validation = {
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'vnet_resource_id': {'key': 'vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'certBlob', 'type': 'str'},
'routes': {'key': 'routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'dnsServers', 'type': 'str'},
}
def __init__(self, vnet_resource_id=None, cert_blob=None, dns_servers=None):
self.vnet_resource_id = vnet_resource_id
self.cert_thumbprint = None
self.cert_blob = cert_blob
self.routes = None
self.resync_required = None
self.dns_servers = dns_servers
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
bedb3ee139e556e1e6b6f28508af6fbeb04ba3f6 | 901944f407f4a06a4c4027d6139ce21165976857 | /Variational_Inference/correlated_samples/distribution_approx.py | 247232f221b2e90e53bb95997f2ce85e766b00a4 | [] | no_license | chriscremer/Other_Code | a406da1d567d63bf6ef9fd5fbf0a8f177bc60b05 | 7b394fa87523803b3f4536b316df76cc44f8846e | refs/heads/master | 2021-01-17T02:34:56.215047 | 2020-05-26T13:59:05 | 2020-05-26T13:59:05 | 34,680,279 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,984 | py |
import numpy as np
import torch
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import math
def plot_isocontours(ax, func, xlimits=[-6, 6], ylimits=[-6, 6],
numticks=101, cmap=None, alpha=1., legend=False):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
aaa = torch.from_numpy(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T).type(torch.FloatTensor)
bbb = func(Variable(aaa))
bbb = bbb.data
zs = torch.exp(bbb)
Z = zs.view(X.shape)
Z=Z.numpy()
cs = plt.contour(X, Y, Z, cmap=cmap, alpha=alpha)
# if legend:
# nm, lbl = cs.legend_elements()
# plt.legend(nm, lbl, fontsize=4, bbox_to_anchor=(0.7, 0.1))
ax.set_yticks([])
ax.set_xticks([])
plt.gca().set_aspect('equal', adjustable='box')
def lognormal4_2(x, mean, logvar):
'''
x: [B,X]
mean,logvar: [X]
output: [B]
'''
D = x.size()[1]
term1 = D * torch.log(torch.FloatTensor([2.*math.pi])) #[1]
return -.5 * (Variable(term1) + logvar.sum(0) + ((x - mean).pow(2)/torch.exp(logvar)).sum(1))
def logpz(z):
# p(z): function that takes in [B,Z], Outputs: [B,1]
cccc = lognormal4_2(z, Variable(torch.zeros(2)+2), Variable(torch.zeros(2)))
aaa = torch.clamp(cccc, min=-30)
# print aaa
# bbb = torch.clamp(lognormal4_2(torch.Tensor(z), Variable(torch.zeros(2)), Variable(torch.zeros(2))), min=-30)
bbb = torch.clamp(lognormal4_2(z, Variable(torch.zeros(2)), Variable(torch.zeros(2))), min=-30)
return torch.log(.5*torch.exp(aaa) + .5*torch.exp(bbb))
# return lognormal4_2(z, Variable(torch.zeros(2)+2), Variable(torch.zeros(2)))
def plot_it(model):
plt.cla()
ax = plt.subplot2grid((rows,cols), (0,0), frameon=False)
plot_isocontours(ax, logpz, cmap='Blues')
func = lambda zs: lognormal4_2(zs, model.mean, model.logvar)
plot_isocontours(ax, func, cmap='Reds')
plt.draw()
plt.pause(1.0/30.0)
def train(model,
path_to_load_variables='', path_to_save_variables='',
epochs=10, batch_size=20, display_epoch=2, k=1):
if path_to_load_variables != '':
model.load_state_dict(torch.load(path_to_load_variables))
print 'loaded variables ' + path_to_load_variables
optimizer = optim.Adam(model.params, lr=.05)
for epoch in range(1, epochs + 1):
optimizer.zero_grad()
elbo, logpz, logqz = model.forward(k=k)
loss = -(elbo)
loss.backward()
optimizer.step()
if epoch%display_epoch==0:
print 'Train Epoch: {}/{}'.format(epoch, epochs), \
'Loss:{:.4f}'.format(loss.data[0]), \
'logpz:{:.4f}'.format(logpz.data[0]), \
'logqz:{:.4f}'.format(logqz.data[0]), \
'test', test(model,k=k)
plot_it(model)
# plt.savefig(home+'/Documents/tmp/thing'+str(epoch)+'.png')
# print 'Saved fig'
# print model.linear_transform.data, model.bias_transform.data, model.logvar2.data
# print model.mean1, model.logvar1
if path_to_save_variables != '':
torch.save(model.state_dict(), path_to_save_variables)
print 'Saved variables to ' + path_to_save_variables
def test(model, path_to_load_variables='', batch_size=50, display_epoch=4, k=10):
if path_to_load_variables != '':
model.load_state_dict(torch.load(path_to_load_variables))
print 'loaded variables ' + path_to_load_variables
elbos = []
data_index= 0
for i in range(batch_size):
# batch = data_x[data_index:data_index+batch_size]
# data_index += batch_size
elbo, logpz, logqz = model(k=k)
elbos.append(elbo.data[0])
# if i%display_epoch==0:
# print i,len(data_x)/ batch_size, elbo.data[0]
return np.mean(elbos)
class IWG(nn.Module):
#Importance Weighted Gaussian
def __init__(self, dim, logpz):
super(IWG, self).__init__()
torch.manual_seed(1000)
self.z_size = dim
self.mean = Variable(torch.zeros(self.z_size)-2., requires_grad=True)
self.logvar = Variable(torch.randn(self.z_size)-3., requires_grad=True)
self.params = [self.mean, self.logvar]
self.logpz = logpz
def sample(self, mu, logvar, k):
eps = Variable(torch.FloatTensor(k, self.z_size).normal_()) #[P,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,Z]
logqz = lognormal4_2(z, mu.detach(), logvar.detach())
return z, logqz
def forward(self, k=1):
z, logqz = self.sample(self.mean, self.logvar, k=k) #[P,B,Z],
logpz = self.logpz(z)
elbo = logpz - logqz #[P,B]
if k>1:
max_ = torch.max(elbo, 0)[0] #[B]
elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]
elbo = torch.mean(elbo) #[1]
logpz = torch.mean(logpz)
logqz = torch.mean(logqz)
return elbo, logpz, logqz
# class MCGS(nn.Module):
# #Multiple Covaried Gaussian Samples
# def __init__(self, dim, logpz):
# super(MCGS, self).__init__()
# torch.manual_seed(1000)
# self.z_size = dim
# self.mean1 = Variable(torch.zeros(self.z_size), requires_grad=True)
# self.logvar1 = Variable(torch.randn(self.z_size)-3., requires_grad=True)
# # self.linear_transform = Variable(torch.zeros(self.z_size, self.z_size), requires_grad=True)
# self.linear_transform = Variable(torch.zeros(self.z_size), requires_grad=True)
# self.bias_transform = Variable(torch.zeros(self.z_size), requires_grad=True)
# self.logvar2 = Variable(torch.randn(self.z_size)-3., requires_grad=True)
# self.params = [self.mean1, self.logvar1, self.linear_transform, self.bias_transform, self.logvar2]
# self.logpz = logpz
# def sample(self, mu, logvar):
# eps = Variable(torch.FloatTensor(1, self.z_size).normal_()) #[1,Z]
# z = eps.mul(torch.exp(.5*logvar)) + mu #[1,Z]
# logqz = lognormal4_2(z, mu.detach(), logvar.detach())
# return z, logqz
# def forward(self, k=1):
# z, logqz = self.sample(self.mean1, self.logvar1) #[1,Z]
# z = torch.squeeze(z) #[Z]
# z2, logqz2 = self.sample((z*self.linear_transform)+self.bias_transform, self.logvar2) #[1,Z]
# #We want z2 under marginal q2
# # z2 = torch.unsqueeze(z2,0)
# # logqz2 = lognormal4_2(z2, (self.mean1*self.linear_transform)+self.bias_transform, self.logvar2 + self.linear_transform.pow(2)/torch.exp(self.logvar1))
# z = torch.unsqueeze(z,0)
# logpz = self.logpz(z)
# logpz2 = self.logpz(z2)
# logpz = logpz + logpz2
# logqz = logqz + logqz2
# elbo = logpz - logqz #[P,B]
# # if k>1:
# # max_ = torch.max(elbo, 0)[0] #[B]
# # elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]
# elbo = torch.mean(elbo) #[1]
# logpz = torch.mean(logpz)
# logqz = torch.mean(logqz)
# return elbo, logpz, logqz
rows = 1
cols = 1
fig = plt.figure(figsize=(4+cols,4+rows), facecolor='white')
plt.ion()
plt.show(block=False)
model = IWG(dim=2, logpz=logpz)
# model = MCGS(dim=2, logpz=logpz)
path_to_load_variables=''
# path_to_load_variables=home+'/Documents/tmp/pytorch_first.pt'
# path_to_save_variables=home+'/Documents/tmp/pytorch_first.pt'
path_to_save_variables=''
train(model=model,
path_to_load_variables=path_to_load_variables,
path_to_save_variables=path_to_save_variables,
epochs=160000, batch_size=4, display_epoch=1, k=2)
| [
"chris.a.cremer@gmail.com"
] | chris.a.cremer@gmail.com |
a651261ed93cdd2601148a23c41040636ad51445 | c571c0ae641eb5b2e8b87a77be4c8180e21846f6 | /prompt_toolkit/contrib/regular_languages/validation.py | 7dbd4e59f4002c75f7227d38a673fcb939d22ae4 | [] | no_license | RyannDaGreat/rp | e3f5feec809b3bee3fd9d514ac0d0debe02a582e | c269bbf02bec92ef109096c6cd83a9fd90f4250c | refs/heads/master | 2023-08-09T13:27:23.370148 | 2023-08-05T08:36:32 | 2023-08-05T08:36:32 | 225,253,287 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | """
Validator for a regular langage.
"""
from __future__ import unicode_literals
from rp.prompt_toolkit.validation import Validator, ValidationError
from rp.prompt_toolkit.document import Document
from .compiler import _CompiledGrammar
__all__ = (
'GrammarValidator',
)
class GrammarValidator(Validator):
"""
Validator which can be used for validation according to variables in
the grammar. Each variable can have its own validator.
:param compiled_grammar: `GrammarCompleter` instance.
:param validators: `dict` mapping variable names of the grammar to the
`Validator` instances to be used for each variable.
"""
def __init__(self, compiled_grammar, validators):
assert isinstance(compiled_grammar, _CompiledGrammar)
assert isinstance(validators, dict)
self.compiled_grammar = compiled_grammar
self.validators = validators
def validate(self, document):
# Parse input document.
# We use `match`, not `match_prefix`, because for validation, we want
# the actual, unambiguous interpretation of the input.
m = self.compiled_grammar.match(document.text)
if m:
for v in m.variables():
validator = self.validators.get(v.varname)
if validator:
# Unescape text.
unwrapped_text = self.compiled_grammar.unescape(v.varname, v.value)
# Create a document, for the completions API (text/cursor_position)
inner_document = Document(unwrapped_text, len(unwrapped_text))
try:
validator.validate(inner_document)
except ValidationError as e:
raise ValidationError(
cursor_position=v.start + e.cursor_position,
message=e.message)
else:
raise ValidationError(cursor_position=len(document.text),
message='Invalid command')
| [
"sqrtryan@gmail.com"
] | sqrtryan@gmail.com |
187cb0f9510dcae98801879866fdeac69464ef56 | 493c81d7ab8cec7b5d5db5e4b0ecc8684d582f02 | /[03]신경망/01_activation function/01_step-function.py | 5eaff997904914eb37f54ef58a430f348d89d4b2 | [] | no_license | PresentJay/Deep-Learning-from-Scratch | 874b55069d338070f1e9f79233c1c4105839ef99 | 5a5f37f9fbd185c6770e0656f72b69c2ffb0b959 | refs/heads/master | 2023-02-10T15:40:17.776379 | 2021-01-12T11:58:45 | 2021-01-12T11:58:45 | 326,636,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | import numpy as np
import matplotlib.pylab as plt
def step_function(x):
# if you implement like below code, you can't use numpy object.
"""
if x>0:
return 1
else:
return 0
"""
# then, you can make it like this below code.
y = x > 0
# numpy array's inequality operation make results about each member.
return y.astype(np.int)
# astype() returns numpy array's type
# astype(<TYPE>) makes numpy array's type to <TYPE>
# then, above code calculates x(numpy array)'s step function to boolean(false or true)
# after that, make boolean to integer, so that results can just be 1 or 0.
if __name__ == "__main__":
x = np.arange(-5.0, 5.0, 0.1)
# arange function generates a numpy array that has members
# at the 3rd argument interval
# from the 1st argument to the 2nd argument
# so it generates [-0.5, -0.4, -0.3, ... , 4.8, 4.9, 5.0]
y = step_function(x)
plt.plot(x, y)
# set graph's parameter
plt.ylim(-0.1, 1.1)
# set limit of y axis
plt.show()
# show graph(plot) | [
"presentj94@gmail.com"
] | presentj94@gmail.com |
b6af520251bdbeabdc1142f4b453573dfd5fe4c3 | ea731d7a1d51da390452bee08f1c941ca8fab432 | /backend/base/serializers.py | 32b4aa8caff736e9f6693d6f54480344f0f32881 | [
"MIT"
] | permissive | omar115/ecommerce-django-react | 007575905af2c78880e4ccd71026572907f82481 | 551b99dcdc9c014b1f9bc76555c0463116b61975 | refs/heads/main | 2023-04-07T12:52:07.607948 | 2021-04-05T19:58:23 | 2021-04-05T19:58:23 | 346,705,168 | 0 | 0 | MIT | 2021-04-06T22:16:44 | 2021-03-11T13:07:32 | Python | UTF-8 | Python | false | false | 375 | py | from django.db import models
from django.db.models import fields
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product #Product model to serialize
fields = '__all__' #will return everything/every products
| [
"omarhasan115@gmail.com"
] | omarhasan115@gmail.com |
8091a505803238ba469903f562743ed60ffd2652 | 2ffc147f67ab5dab745b4a33c55c515f456dcffd | /planegeometry/structures/quadtree.py | 3bb2bc3644649429478e6f94dcdd8b6cb40ba0da | [
"BSD-3-Clause"
] | permissive | ufkapano/planegeometry | f2cc637ab53085adeeb38e5dd54697cc1a809bd9 | 93417f2de3ec1694b5a63b1d77b96138bf8db20d | refs/heads/master | 2023-05-28T05:31:58.674598 | 2023-04-29T07:57:32 | 2023-04-29T07:57:32 | 223,356,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,692 | py | #!/usr/bin/env python3
class QuadTree:
"""The class defining a quadtree.
https://en.wikipedia.org/wiki/Quadtree
"""
def __init__(self, rect, capacity=4):
"""Make a quadtree."""
self.rect = rect # area for points
self.capacity = capacity
self.point_list = []
# If len(point_list) <= capacity, then there are no children.
self.top_left = None
self.top_right = None
self.bottom_left = None
self.bottom_right = None
def __str__(self):
"""String representation of a quadtree."""
return "QuadTree({}, {})".format(self.rect, self.capacity)
def _is_divided(self):
"""Test if the quadtree is divided."""
return self.top_left is not None
def height(self):
"""Return the height of the quadtree."""
if self._is_divided():
tl = self.top_left.height()
tr = self.top_right.height()
bl = self.bottom_left.height()
br = self.bottom_right.height()
return 1 + max(tl, tr, bl, br)
else:
return 1
def insert(self, point):
"""Insert a point into the quadtree."""
# Ignore points that do not belong in this quadtree.
if point not in self.rect:
return False
# If there is space in this quadtree and if doesn't have subdivisions,
# add the point here.
if len(self.point_list) < self.capacity and not self._is_divided():
self.point_list.append(point)
return True
# Otherwise, subdivide and then add the point to whichever node
# will accept it.
if not self._is_divided():
self._subdivide()
# We have to add the points contained into this quad array
# to the new quads if we want that only the last node holds the point.
# Czyli tu mozna przerzucic punkty z point_list do poddrzew.
# Wtedy tylko liscie beda zawieraly punkty.
# while self.point_list: # option: moving points to leafs
# self.insert(self.point_list.pop())
if self.top_left.insert(point):
return True
if self.top_right.insert(point):
return True
if self.bottom_left.insert(point):
return True
if self.bottom_right.insert(point):
return True
def _subdivide(self):
"""Subdividing the currect rect."""
# Divide rect on 4 equal parts.
tl, tr, bl, br = self.rect.make4()
self.top_left = QuadTree(tl, self.capacity)
self.top_right = QuadTree(tr, self.capacity)
self.bottom_left = QuadTree(bl, self.capacity)
self.bottom_right = QuadTree(br, self.capacity)
def query(self, query_rect):
"""Find all points that appear within a range."""
points_in_rect = []
try:
self.rect.intersection(query_rect)
except ValueError:
return []
# Check points at this quadtree level.
for pt in self.point_list:
if pt in query_rect:
points_in_rect.append(pt)
# Terminate here, if there are no children.
if not self._is_divided():
return points_in_rect
# Otherwise, add the points from the children.
children = (self.top_left, self.top_right,
self.bottom_left, self.bottom_right)
for child in children:
points_in_rect.extend(child.query(query_rect))
return points_in_rect
def nearest(self, point, best=None):
"""Find a nearest point."""
# best to kandydat najlepszy do tej pory znaleziony.
# Szukanie punktu najblizszego do podanego.
# http://bl.ocks.org/patricksurry/6478178
# Jezeli best nie zostal podany, to jestesmy na najwyzszym
# poziomie i musza byc jakies punkty w obszarze.
# Czyli tu przyjmuje, ze punkty sa nie tylko w lisciach, ale tez wyzej.
if best is None:
best = self.point_list[0]
distance = (point-best).length()
# Nie sprawdzamy obszaru, jezeli point jest za daleko.
if (point.x < self.rect.pt1.x - distance or
point.x > self.rect.pt2.x + distance or
point.y < self.rect.pt1.y - distance or
point.y > self.rect.pt2.y + distance):
return best
# Sprawdzenie punktow w wezle.
for pt in self.point_list:
new_distance = (point-pt).length()
if new_distance < distance:
best = pt
distance = new_distance
# Terminate here, if there are no children.
if not self._is_divided():
return best
# Otherwise, check the children.
# Finding the best children ordering for searching.
c = self.rect.center()
if point.x > c.x: # right, left
if point.y > c.y: # top, bottom
children = (self.top_right, self.top_left,
self.bottom_right, self.bottom_left)
else: # bottom, top
children = (self.bottom_right, self.bottom_left,
self.top_right, self.top_left)
else: # left, right
if point.y > c.y: # top, bottom
children = (self.top_left, self.top_right,
self.bottom_left, self.bottom_right)
else: # bottom, top
children = (self.bottom_left, self.bottom_right,
self.top_left, self.top_right)
for child in children:
best = child.nearest(point, best)
return best
# EOF
| [
"andrzej.kapanowski@uj.edu.pl"
] | andrzej.kapanowski@uj.edu.pl |
1a67ab4b5d557d027babb9feb13a7571a4ebfa74 | bfd2e76748334eaabcd00b984eb91bfe884f76b2 | /tests/testPixelTransformer.py | 6a5b2c6595c28a4b4449c8ce6acd8e17b3e0c67e | [] | no_license | rmjarvis/sims_coordUtils | 52ae427bcee66a3bacd3f047c67ea97b54b7ea2a | 567e106573fa4239a4435782a881f3786cc9daf0 | refs/heads/master | 2020-03-31T11:38:49.229751 | 2018-10-03T23:19:30 | 2018-10-03T23:19:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | import unittest
import numpy as np
import lsst.utils.tests
from lsst.sims.coordUtils import DMtoCameraPixelTransformer
from lsst.sims.coordUtils import lsst_camera
from lsst.sims.coordUtils import pupilCoordsFromPixelCoords
from lsst.afw.cameraGeom import FOCAL_PLANE, PIXELS
def setup_module(module):
lsst.utils.tests.init()
class PixelTransformerTestCase(unittest.TestCase):
"""
This unit test TestCase will exercise the class that transforms between
DM pixels and Camera Team pixels.
Recall that their conventions differ in that
Camera +y = DM +x
Camera +x = DM -y
"""
def test_camPixFromDMpix(self):
"""
test that trasformation between Camera Team and DM pixels works
"""
camera_wrapper = DMtoCameraPixelTransformer()
rng = np.random.RandomState()
camera = lsst_camera()
npts = 200
for det in camera:
det_name = det.getName()
cam_x_in = rng.random_sample(npts)*4000.0
cam_y_in = rng.random_sample(npts)*4000.0
dm_x, dm_y = camera_wrapper.dmPixFromCameraPix(cam_x_in, cam_y_in, det_name)
cam_x, cam_y = camera_wrapper.cameraPixFromDMPix(dm_x, dm_y, det_name)
np.testing.assert_array_almost_equal(cam_x_in, cam_x, decimal=10)
np.testing.assert_array_almost_equal(cam_y_in, cam_y, decimal=10)
center_point = camera[det_name].getCenter(FOCAL_PLANE)
pixel_system = camera[det_name].makeCameraSys(PIXELS)
center_pix = camera.transform(center_point, FOCAL_PLANE, pixel_system)
# test that DM and Camera Team pixels are correctly rotated
# with respect to each other
np.testing.assert_allclose(dm_x-center_pix.getX(),
cam_y-center_pix.getX(),
atol=1.0e-10, rtol=0.0)
np.testing.assert_allclose(dm_y-center_pix.getY(),
center_pix.getY()-cam_x,
atol=1.0e-10, rtol=0.0)
del camera_wrapper
del lsst_camera._lsst_camera
class MemoryTestClass(lsst.utils.tests.MemoryTestCase):
pass
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
| [
"scott.f.daniel@gmail.com"
] | scott.f.daniel@gmail.com |
2e3afb6b03f2e8a1cfc4eb67adf3e2fe3bca3d37 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03993/s616663731.py | edeacf223eb393ec2250d3db413f2e353a68d1d3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import numpy as np
n = int(input())
a = list(map(int, input().split()))
a = np.array(a) - 1
ans = 0
for i in range(n):
if a[a[i]] == i:
ans += 1
print(ans // 2)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
53a42978bfc72e7751bf3b8189a7e5db3afc19f5 | 9b78bc34a80e4137da15648b094196bfce144091 | /Pantallas/Calidad/editarTrabajosControlados.py | c6198d8c709b1fc600ad2492389339226213c16a | [] | no_license | AlemFernandez786/Scabox | a3cb3bc16e0bcc95dda7671c19ca9b2fab11247e | 733efd788451b2ce914a5773f71789fbe246d027 | refs/heads/master | 2022-03-02T05:27:39.097097 | 2019-11-07T22:52:33 | 2019-11-07T22:52:33 | 202,450,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'editarTrabajosControlados.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(534, 277)
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(20, 30, 501, 41))
self.label_2.setStyleSheet("font-size:20px;\n"
"")
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.ca_btn_volver = QtWidgets.QPushButton(Form)
self.ca_btn_volver.setGeometry(QtCore.QRect(290, 200, 131, 31))
self.ca_btn_volver.setStyleSheet("background-color:#5f5fff;\n"
"color:white;\n"
"font-size:10pt;\n"
"border:none;")
self.ca_btn_volver.setObjectName("ca_btn_volver")
self.ca_btn_eliminar = QtWidgets.QPushButton(Form)
self.ca_btn_eliminar.setGeometry(QtCore.QRect(130, 200, 131, 31))
self.ca_btn_eliminar.setStyleSheet("color:white;\n"
"font-size:10pt;\n"
"border:none;\n"
"background-color:#ff4e4e;")
self.ca_btn_eliminar.setObjectName("ca_btn_eliminar")
self.ca_input_1 = QtWidgets.QLineEdit(Form)
self.ca_input_1.setGeometry(QtCore.QRect(220, 120, 251, 31))
self.ca_input_1.setStyleSheet("border:none;\n"
"background-color: rgb(255, 255, 255);\n"
"font: 11pt \"MS Shell Dlg 2\";")
self.ca_input_1.setText("")
self.ca_input_1.setObjectName("ca_input_1")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(40, 120, 151, 31))
self.label_4.setStyleSheet("font: 11pt \"MS Shell Dlg 2\";\n"
"text-align:center;")
self.label_4.setObjectName("label_4")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Trabajos controlados"))
self.label_2.setText(_translate("Form", "Eliminar Trabajo Controlado"))
self.ca_btn_volver.setText(_translate("Form", "Volver"))
self.ca_btn_eliminar.setText(_translate("Form", "Eliminar"))
self.label_4.setText(_translate("Form", "Número de Orden"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_()) | [
"gustavorojas.1403@gmail.com"
] | gustavorojas.1403@gmail.com |
d0e8c346e451f2d2b6b3573672539c8a00f7a4a0 | c804db17b29c5929c87768b7fa6ebb28f937fc36 | /eu113.py | 2ba92551724e93eaa87233d85e67ca78414998e6 | [] | no_license | ishandutta2007/ProjectEuler-2 | a1f07329fd895828e740d0cf71b0b0a4de261b4c | 29265e4e3dccc67d05ef8d6129363cea1705d970 | refs/heads/master | 2023-03-17T03:19:35.220761 | 2019-04-18T13:39:40 | 2019-04-18T13:39:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | # ----------------------------------------- Non-bouncy numbers ------------------------------------------------ #
# #
# Working from left-to-right if no digit is exceeded by the digit to its left it is called #
# an increasing number; for example, 134468. #
# #
# Similarly if no digit is exceeded by the digit to its right it is called a decreasing number; #
# for example, 66420. #
# #
# We shall call a positive integer that is neither increasing nor decreasing a "bouncy" number; #
# for example, 155349. #
# #
# As n increases, the proportion of bouncy numbers below n increases such that there are only #
# 12951 numbers below one-million that are not bouncy and only 277032 non-bouncy numbers below 10^10. #
# #
# How many numbers below a googol (10^100) are not bouncy? #
# ------------------------------------------------------------------------------------------------------------- #
import time
def countIncreasingNumbersWithLen(l):
def countIncreasingNumbers(l, lastDigit):
if (l == 1):
countIncreasingNumbers.HISTORY[(l, lastDigit)] = 1
return 1
if ((l, lastDigit) in countIncreasingNumbers.HISTORY):
return countIncreasingNumbers.HISTORY[(l, lastDigit)]
s = 0
for i in range(1, lastDigit + 1):
s += countIncreasingNumbers(l - 1, i)
countIncreasingNumbers.HISTORY[(l, lastDigit)] = s
return s
countIncreasingNumbers.HISTORY = { }
s = 0
for i in range(1, 10):
s += countIncreasingNumbers(l, i)
return s
def countDecreasingNumbersWithLen(l):
def countDecreasingNumbers(l, lastDigit):
if (l == 1):
if (lastDigit == 0):
countDecreasingNumbers.HISTORY[(l, lastDigit)] = 0
else:
countDecreasingNumbers.HISTORY[(l, lastDigit)] = 1
return countDecreasingNumbers.HISTORY[(l, lastDigit)]
if ((l, lastDigit) in countDecreasingNumbers.HISTORY):
return countDecreasingNumbers.HISTORY[(l, lastDigit)]
s = 0
for i in range(9, lastDigit - 1, -1):
s += countDecreasingNumbers(l - 1, i)
countDecreasingNumbers.HISTORY[(l, lastDigit)] = s
return s
countDecreasingNumbers.HISTORY = { }
s = 0
for i in range(9, -1, -1):
s += countDecreasingNumbers(l, i)
return s
def eu113():
TOP = 10 ** 100
s = 0
for l in range(1, len(str(TOP))):
s += countIncreasingNumbersWithLen(l)
s += countDecreasingNumbersWithLen(l)
s -= 9
return s
# BETTER WAY
#count = 0
#for i in range(1,101):
# count += nCr(8+i,i)
# count += nCr(9+i,i)
# count -= 10
#print (count)
if __name__ == "__main__":
startTime = time.clock()
print (eu113())
elapsedTime = time.clock() - startTime
print ("Time spent in (", __name__, ") is: ", elapsedTime, " sec")
| [
"roee.sefi@gmail.com"
] | roee.sefi@gmail.com |
d004cdebc1cd29616ee134f510b0518b84309611 | df0c4875b45e68c106dd1e2ba397f71a10794327 | /src/pifetcher/data_fetchers/base_data_fetcher.py | 0a6335f0d499280ebf6b6e68c65a7af2f3e268a1 | [
"MIT"
] | permissive | gavinz0228/pifetcher | c28b407cf4965852af67ffe619a55ee90fa49a72 | c8419ae153eefed04e0e8b239cf1a9226fa91c29 | refs/heads/master | 2021-07-04T20:26:41.973408 | 2020-11-22T16:57:38 | 2020-11-22T16:57:38 | 203,682,327 | 1 | 0 | null | 2019-08-24T17:04:59 | 2019-08-22T00:06:58 | Python | UTF-8 | Python | false | false | 3,381 | py | import json
from abc import ABC, abstractmethod
from os import path, getcwd, get_exec_path
from bs4 import BeautifulSoup
from pifetcher.core import Config
from pifetcher.utilities import DataUtils, SysUtils
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from sys import platform
if platform == 'win32':
KEY_DRIVER_PATH = 'win-driver_path'
KEY_BINARY_LOCATION = 'win-binary_location'
elif platform == "darwin":
KEY_DRIVER_PATH = 'mac-driver_path'
KEY_BINARY_LOCATION = 'mac-binary_location'
elif platform in ['linux', 'linux2']:
KEY_DRIVER_PATH = 'linux-driver_path'
KEY_BINARY_LOCATION = 'linux-binary_location'
def check_init(func):
def wrapper(*args, **kwargs):
self = args[0]
if not self.initialized:
raise Exception("Dom is used before initializing.")
return func(*args, **kwargs)
return wrapper
class BaseDataFetcher(ABC):
def __init__(self, config_file_path):
# initialize browser
options = Options()
for option in Config.browser['browser_options']:
options.add_argument(option)
if path.exists(Config.browser[KEY_BINARY_LOCATION]):
options.binary_location = Config.browser[KEY_BINARY_LOCATION]
driver_abs_path = Config.browser[KEY_DRIVER_PATH]
if not path.exists(Config.browser[KEY_DRIVER_PATH]):
driver_abs_path = path.join(
path.dirname(path.realpath(__file__)), '../',
'drivers/' + Config.browser[KEY_DRIVER_PATH])
SysUtils.set_executable_permission(driver_abs_path)
if not path.exists(driver_abs_path):
raise Exception(f"driver path {driver_abs_path} cannot be found")
self.driver = webdriver.Chrome(options=options, executable_path=driver_abs_path)
# initialize class variable
self.html_source = None
self.dom = None
self.initialized = False
self.config = self.load_config(config_file_path)
def load_html_by_url(self, url):
self.driver.get(url)
self.html_source = self.driver.page_source
self.dom = BeautifulSoup(self.html_source, 'html.parser')
self.initialized = True
return self.html_source
def load_config(self, config_file_path):
with open(config_file_path, 'r') as json_config:
return json.load(json_config)
def parse(self):
return_obj = {}
parsed_data = False
for field, val_config in self.config.items():
value, _ = self.get_value(val_config['selector'], val_config['type'], val_config['attribute'])
if value:
parsed_data = True
return_obj[field] = value
return return_obj, parsed_data
def close(self):
self.driver.quit()
@check_init
def get_value(self, path, type, attribute):
element = self.dom.select_one(path)
if not element:
return None, f"element {path} was not found"
if attribute == ".text":
return DataUtils.extract_by_type_name(element.text.strip(), type)
elif attribute:
return DataUtils.extract_by_type_name(element[attribute].strip(), type)
if __name__ == "__main__":
pass
| [
"gavinz0228@gmail.com"
] | gavinz0228@gmail.com |
6804348abdbb9c1e0f2db1fb04909833732d0d9b | 3f84f51751c4191bb81c9df7094578461fb12a2d | /AtcoderProblems/ABC/ABC144/A.py | 3f078cee2e43f9833180838fad98b7f603acc1bd | [] | no_license | rikukawamura/atcoder | 7ff49f1bd8534b99d87fe81ef950e1ba77eee8b8 | 09c0cfe3ce25be56d338614a29e996f4106117cd | refs/heads/master | 2023-08-13T21:21:19.058219 | 2021-09-28T10:02:42 | 2021-09-28T10:02:42 | 329,206,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | def int_sp():
return map(int, input().split())
def li_int_sp():
return list(map(int, input().split()))
def trans_li_int_sp():
return list(map(list, (zip(*[li_int_sp() for _ in range(N)]))))
import pdb
A, B = int_sp()
if 1<= A <= 9 and 1<= B <= 9:
print(A*B)
else:
print(-1) | [
"49993650+rikukawamura@users.noreply.github.com"
] | 49993650+rikukawamura@users.noreply.github.com |
0cd50a67a458d0e8860a4e3d42b10ed10e8c8ede | aad9c55e42eeda22bdee2148f0fbc00bf2a3083f | /curly/utils.py | b9a6a16ae55b525e6be717a4ad5e873fa00fcefa | [
"MIT"
] | permissive | 9seconds/curly | 38c7799987bc36eee3413fdad0992432139eb3f5 | b948f96001d25a866d1acdfe91f8bc48684fc1d1 | refs/heads/master | 2020-12-31T00:11:57.940075 | 2017-05-16T07:43:01 | 2017-05-16T07:43:01 | 86,548,765 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | # -*- coding: utf-8 -*-
"""A various utilities which are used by Curly."""
import re
import shlex
import textwrap
from curly import exceptions
def make_regexp(pattern):
"""Make regular expression from the given patterns.
This is just a trivial wrapper for :py:func:`re.compile` which sets
a list of default flags.
:param str pattern: A pattern to compile into regular expression.
:return: Compiled regular expression
"""
pattern = textwrap.dedent(pattern)
pattern = re.compile(pattern, re.MULTILINE | re.VERBOSE | re.DOTALL)
return pattern
def make_expression(text):
"""Make template expression from the tag in the pattern.
Anatomy of the tag is rather simple: ``{% if something | went
| through "named pipe" %}`` is a valid tag. ``if`` here is the
function name you are going to apply. And this long line ``something
| went | through "named pipe"`` is called expression. Yes, basic
reference implementation considers whole expression as a name in the
context, but in more advanced implementations, it is a DSL which is
used for calculation of function arguments from the expression.
This function uses shell lexing to split expression above into
the list of words like ``["something", "|", "went", "through", "named
pipe"]``.
:param text: A text to make expression from
:type text: str or None
:return: The list of parsed expressions
:rtype: list[str]
"""
text = text or ""
text = text.strip()
text = shlex.split(text)
if not text:
text = [""]
return text
def resolve_variable(varname, context):
"""Resolve value named as varname from the context.
In most trivial case, implementation of the method is like that:
.. code-block:: python3
def resolve_variable(varname, context):
return context[varname]
but it works only for the most trivial cases. Even such simple
template language as Curly is is required to support dot notation.
So:
.. code-block:: pycon
>>> context = {
... "first_name": "Sergey",
... "last_name": "Arkhipov",
... "roles": {
... "admin": [
... "view_user",
... "update_user",
... "delete_user"
... ]
... }
... }
>>> resolve_variable("roles.admin.1", context)
'update_user'
So it is possible to resolve nested structures and also - arrays.
But sometimes you may have ambiguous situations. The rule of this
function is to try to resolve literally before going deep into the
nested structure.
.. code-block:: pycon
>>> context = {
... "first_name": "Sergey",
... "last_name": "Arkhipov",
... "roles": {
... "admin": [
... "view_user",
... "update_user",
... "delete_user"
... ]
... },
... "roles.admin.1": "haha!"
... }
>>> resolve_variable("roles.admin.1", context)
'haha!'
Also, dot notation supports not only items, but attributes also.
:param str varname: Expression to resolve
:param dict context: A dictionary with variables to resolve.
:return: Resolved value
:raises:
:py:exc:`curly.exceptions.CurlyEvaluateNoKeyError`: if it is
not possible to resolve ``varname`` within a ``context``.
"""
try:
return get_item_or_attr(varname, context)
except exceptions.CurlyEvaluateError:
pass
chunks = varname.split(".", 1)
if len(chunks) == 1:
raise exceptions.CurlyEvaluateNoKeyError(context, varname)
current_name, rest_name = chunks
new_context = resolve_variable(current_name, context)
resolved = resolve_variable(rest_name, new_context)
return resolved
def get_item_or_attr(varname, context):
"""Resolve literal varname in context for :py:func:`resolve_variable`.
Supports resolving of items and attributes. Also, tries indexes if
possible.
:param str varname: Expression to resolve
:param dict context: A dictionary with variables to resolve.
:return: Resolved value
:raises:
:py:exc:`curly.exceptions.CurlyEvaluateNoKeyError`: if it is
not possible to resolve ``varname`` within a ``context``.
"""
try:
return context[varname]
except Exception:
try:
return getattr(context, varname)
except Exception as exc:
pass
if isinstance(varname, str) and varname.isdigit():
return get_item_or_attr(int(varname), context)
raise exceptions.CurlyEvaluateNoKeyError(context, varname)
| [
"nineseconds@yandex.ru"
] | nineseconds@yandex.ru |
b27fb6b963dea24cc58d5e13a59c6b34dcad9971 | 12ff124f0689134f393714e97849c3d1d44db91c | /Chapter2_面试需要的基础知识/08_二叉树的下一个节点.py | 33c354b09f872350aa6adff0e37fe9e95cd045a7 | [] | no_license | yuanyuanzijin/Offer-in-Python | 8a596150c6bf09db7eb38b0aaedf064385795c55 | b70bdc3855c37d3212f95261778a7eb389a9bded | refs/heads/master | 2020-03-25T06:56:51.724102 | 2018-10-08T14:30:11 | 2018-10-08T14:30:11 | 143,533,492 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | ## 题目描述:给定一个二叉树和其中的一个结点,请找出中序遍历顺序的下一个结点并且返回。注意,树中的结点不仅包含左右子结点,同时包含指向父结点的指针。
## 如果有右子树,则为右子树的最左子节点,否则看下一条
## 如果该节点没有父节点(即为根节点),则没有结果,否则看下一条
## 如果该节点是父节点的左子节点,则为它的父节点,否则看下一条
## 逐层向上,直到当前节点为父节点的左子节点时,得到答案,答案为它的父节点
class Solution:
def GetNext(self, pNode):
# write code here
if pNode.right:
pNode = pNode.right
while pNode.left:
pNode = pNode.left
return pNode
if not pNode.next:
return None
if pNode == pNode.next.left:
return pNode.next
while pNode.next:
if pNode == pNode.next.left:
return pNode.next
else:
pNode = pNode.next
return None
| [
"jinluyuan@vip.qq.com"
] | jinluyuan@vip.qq.com |
3e785713fdee8ab7744bc783f0f19b46996e77f8 | 220dd5d666b85b716d5762097fb2d320bb8587fd | /sorting_searching/sorted_matrix_search.py | e352e0533bbef86772fb240750ae70fc0cc77025 | [] | no_license | bensenberner/ctci | 011946a335396def198acaa596d2b5902af7b993 | 1e1ba64580ee96392c92aa95904c4751e32d4e30 | refs/heads/master | 2021-01-10T14:09:01.698323 | 2020-09-06T04:17:53 | 2020-09-06T04:17:53 | 45,659,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | """
Naive solution: []
"""
def binary_search(arr, element):
n = len(arr)
low_idx, exclusive_end_idx = 0, n
while low_idx < exclusive_end_idx:
mid_idx = (low_idx + exclusive_end_idx) // 2
if arr[mid_idx] == element:
return mid_idx
if arr[mid_idx] > element:
exclusive_end_idx = mid_idx
if arr[mid_idx] < element:
low_idx = mid_idx + 1
return -1
def find_element(matrix, element):
for row_idx, row in enumerate(matrix):
col_idx = binary_search(row, element)
if col_idx != -1:
return row_idx, col_idx
return -1, -1
| [
"benlerner95@gmail.com"
] | benlerner95@gmail.com |
9eec419e2b91bc07ee71e1d888e944b71026840c | befa2a3f95980606b99c6dd82e44339dbb9f437b | /bluebottle/accounting/migrations/0003_auto_20160523_1525.py | e0c9c759e3dcd867ea9de7495edf48a35a2d5ece | [
"BSD-2-Clause"
] | permissive | jfterpstra/bluebottle | 73a9b863f3a1880f2965e87c4dfc1730c7a6023e | 6812a376a48272fcb03fd5c9ea2ab9a6d4bf0fd8 | refs/heads/develop | 2020-12-25T05:35:31.002404 | 2016-06-15T14:58:11 | 2016-06-15T14:58:11 | 40,589,066 | 0 | 0 | null | 2015-08-12T08:24:29 | 2015-08-12T08:24:29 | null | UTF-8 | Python | false | false | 1,521 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-23 13:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payouts', '0001_initial'),
('accounting', '0002_auto_20160523_1525'),
]
operations = [
migrations.AddField(
model_name='banktransaction',
name='payout',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='payouts.ProjectPayout', verbose_name='Campaign payout'),
),
migrations.AddField(
model_name='banktransaction',
name='remote_payment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounting.RemoteDocdataPayment', verbose_name='Docdata payment'),
),
migrations.AddField(
model_name='banktransaction',
name='remote_payout',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounting.RemoteDocdataPayout', verbose_name='Docdata payout'),
),
migrations.AddField(
model_name='banktransaction',
name='tenant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounting.BankTransactionTenant', verbose_name='Tenant'),
),
]
| [
"ernst@onepercentclub.com"
] | ernst@onepercentclub.com |
9b24d6467ba6e0f25a9254040bc48f7b9cbf0bb1 | 747f759311d404af31c0f80029e88098193f6269 | /extra-addons/account_banking/sepa/__init__.py | e2e843ef101f849fbdd2989df74bf9d0bdd2e8d8 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import iban
import online
IBAN = iban.IBAN
BBAN = iban.BBAN
| [
"geerish@omerp.net"
] | geerish@omerp.net |
958a5930d92ff8bddb9def5002da531735df668a | 8f221959842e81ecb8fbcf134218f0746bbec59f | /venv/Lib/site-packages/cls/__init__.py | 3b2e7e5b73b57ef812df09daa05eff16f614ad17 | [] | no_license | pluto-er/api-test | 989de8c9d48b20d8712f8dfacb4703138b619bc5 | 4d1d9385e40c2a7420076cdb7ad397e2d9942ed0 | refs/heads/master | 2020-06-15T00:29:09.487345 | 2019-07-04T03:39:59 | 2019-07-04T03:39:59 | 195,163,614 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | #!/usr/bin/env python
import inspect
import public
def _validate(cls):
if not inspect.isclass(cls):
raise ValueError("%s not a class" % cls)
def _is_attr(value):
return not inspect.ismethod(value) and not isinstance(value, property)
@public.add
def attrs(cls):
"""return a list with class attrs names"""
_validate(cls)
result = []
for key, value in cls.__dict__.items():
if key[:2] != '__' and _is_attr(value):
result.append(key)
return list(sorted(result))
@public.add
def properties(cls):
"""return a list with class properties names"""
_validate(cls)
result = []
for key, value in cls.__dict__.items():
if isinstance(value, property):
result.append(key)
return list(sorted(result))
| [
"1357282015@qq.com"
] | 1357282015@qq.com |
02d0c7bf68087f21b801cf714302de48015ea5bd | f83f1c69d9eb58cc58d07fdb43f9a8b2a33d54bb | /Indexingdataframes.py | 1557adec0713580f708e3ef5e452892986bb9f5a | [] | no_license | baibhab007/Python-Pandas-HandsOn | 0b7217beeea003bf50d99b657a9918e6b4f0bc52 | 59a4fe8e91065cadc03a375e724e4629ea16d81c | refs/heads/master | 2020-07-04T01:40:47.912646 | 2019-09-03T14:05:07 | 2019-09-03T14:05:07 | 202,111,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | ####
Task 1
Create an index named dates representing a range of dates starting from 1-Sep-2017 to 15-Sep-2017.
Note: Use the date_range method of pandas.
Print the 3rd element of the created DateTimeIndex.
####
import datetime
import numpy as np
import pandas as pd
dates = pd.date_range(start='9/1/2017', end='9/15/2017')
print(dates[2])
####
Task 2
Convert the following date strings into datetime objects: datelist = ['14-Sep-2017', '9-Sep-2017'].
Note: Use the to_datetime method of pandas.
Capture the result in the variable 'dates_to_be_searched' and print it.
####
datelist = ['14-Sep-2017', '9-Sep-2017']
dates_to_be_searched = pd.to_datetime(datelist)
print(dates_to_be_searched)
####
Task 3
Check if the elements of 'dates_to_be_searched' are present in DateTimeIndex, 'dates', created above.
Note: Use the isin method associated with DatetimeIndex objects.
Print the output.
####
print((dates_to_be_searched).isin(dates))
####
Task 4
Create a multi index named mi_index of two levels, represented in the following array arraylist: arraylist = [['classA']*5 + ['classB']*5, ['s1', 's2', 's3','s4', 's5']*2] .
Note: Use MultiIndex of pandas.
Print levels of mi_index.
####
arraylist = [['classA']*5 + ['classB']*5, ['s1', 's2', 's3','s4', 's5']*2])
mi_index = pd.MultiIndex.from_arrays(arraylist)
print(mi_index)
| [
"noreply@github.com"
] | baibhab007.noreply@github.com |
bd1cf2a3db6685468db99c3b091bc8c5c44efa68 | cc6e7f63eaf4b3570771c46fb8b24b88e6e1f59e | /beginner/168/C.py | 8b101fbf2011d9eee0ebc6951865eb0e55a01777 | [] | no_license | kamojiro/atcoderall | 82a39e9be083a01c14445417597bf357e6c854a8 | 973af643c06125f52d302a5bc1d65f07a9414419 | refs/heads/master | 2022-07-12T00:14:38.803239 | 2022-06-23T10:24:54 | 2022-06-23T10:24:54 | 161,755,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #import sys
#input = sys.stdin.readline
from math import pi, cos, sqrt
def main():
A, B, H, M = map( int, input().split())
ans = A**2 + B**2 - 2*A*B*cos((M/60 - (H/12+M/(12*60)))*2*pi)
print(sqrt(ans))
if __name__ == '__main__':
main()
| [
"tamagoma002@yahoo.co.jp"
] | tamagoma002@yahoo.co.jp |
f943e3c28c8e7d969268739a41947bd3e94da15f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_creeks.py | cadafd87ceec361778f878ddc50d64aa4bd02bef | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _CREEKS():
def __init__(self,):
self.name = "CREEKS"
self.definitions = creek
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['creek']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
beaf787a79db6257760dc02bc15f62bc7f9118d9 | e638e9fda0e672fa9a414515d0c05a24ab55ad38 | /BestTimeToBuyAndSellStockIV.py | abc489e6bf529489cc1aa57e612c5c0e94a96380 | [] | no_license | zjuzpz/Algorithms | 8d1c7d50429aa5540eb817dc5495a20fc3f11125 | 2df1a58aa9474f2ecec2ee7c45ebf12466181391 | refs/heads/master | 2021-01-21T05:55:48.768728 | 2020-08-04T22:44:08 | 2020-08-04T22:44:08 | 44,586,024 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | """
188. Best Time to Buy and Sell Stock IV
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
"""
# O(n) ~ O(k * n)
# O(k)
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
if k > len(prices) // 2:
return self.maxProfitInfiniteTimes(prices)
max_buy = [-float("inf") for i in range(k + 1)]
max_buy[0] = 0
max_sell = [0 for i in range(k + 1)]
for i in range(len(prices)):
for j in range(1, min(k, i // 2 + 1) + 1):
max_buy[j] = max(max_buy[j], max_sell[j - 1] - prices[i])
max_sell[j] = max(max_sell[j], prices[i] + max_buy[j])
return max_sell[-1]
def maxProfitInfiniteTimes(self, prices):
profit = 0
for i in range(1, len(prices)):
profit += max(0, prices[i] - prices[i - 1])
return profit
if __name__ == "__main__":
prices = [3, 1, 8, 7, 10, 2, 11, 4, 3, 13]
print(Solution().maxProfit(3, prices))
| [
"zjuzpz@gmail.com"
] | zjuzpz@gmail.com |
71f8ce55cd50ec35a4503869ac8a39b39ca214df | 224dea99ce9c52abdb9cf11105cbff34e31fbd37 | /openpickle.py | 7220f3f7a0adbe77e349a68cb01ae6c0432569cd | [] | no_license | lwerdna/miscellany | a613e3cbab3e7d7fced213c456a34a247476df98 | 000ada42bfd5304337921c6f7d3779854d9a0890 | refs/heads/master | 2023-08-19T22:12:23.053768 | 2023-08-11T15:25:06 | 2023-08-11T15:25:06 | 67,312,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python
import os, sys
import pprint
import pickle
fpath = sys.argv[1]
with open(fpath, 'rb') as f:
data = pickle.load(f)
pprint.pprint(data)
#print('variable named data holds the loaded pickle')
#breakpoint()
| [
"andrew@vector35.com"
] | andrew@vector35.com |
a1cafd13e6b0e15bc58e92b3afa9fe2fb9719a37 | b5bc72861644c274b75e42374201ea8cdb84c1a2 | /modules/sys_examples/sys/sys/sys-modules-example-1.py | 7630bf296f80b50236aee0d0bacbd34653c21ee6 | [] | no_license | Aadhya-Solution/PythonExample | 737c3ddc9ad5e3d0cde24ac9f366ce2de2fa6cfe | 34bc04570182130ebc13b6c99997c81834ad5f53 | refs/heads/master | 2022-12-18T09:54:30.857011 | 2020-08-24T13:53:59 | 2020-08-24T13:53:59 | 288,183,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # File: sys-modules-example-1.py
'''
The modules dictionary contains all
loaded modules.
The import statement checks this
dictionary before it actually loads
something from disk.
As you can see from the following example,
Python loads quite a bunch of modules
before it hands control over to your script.
'''
import sys
print sys.modules
for k,v in sys.modules.items():
print k,"-->",v
## ['os.path', 'os', 'exceptions', '__main__', 'ntpath', 'strop', 'nt',
## 'sys', '__builtin__', 'site', 'signal', 'UserDict', 'string', 'stat']
| [
"shashikant.pattar@gmail.com"
] | shashikant.pattar@gmail.com |
77d990e511fcd6dd4bb4f87b988989abd67740d3 | bf0d7c8d987d5fda14208eb9ce70e31c83c25c25 | /c-ex3-skel/classifier.py | 55e0b98453f1206ecdd246801b7879a0acea89c0 | [] | no_license | SummerBigData/SamRepo | 7876e9393c7175e300e175a60c17633c3b23a1bb | fd84ad654370faa48c084349952c2921fde4032d | refs/heads/master | 2020-03-18T05:09:08.787956 | 2018-06-18T17:11:49 | 2018-06-18T17:11:49 | 134,327,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | from numpy import *
from numpy.linalg import solve, pinv, LinAlgError
from scipy.special import expit
from scipy.optimize import minimize
# class for implementing a logistic regression classifier for the given <target> number
class LogRegMulti:
def __init__(self, X, y, target):
X = hstack((ones((X.shape[0],1)), X)) # append the bias column
self.X = X
self.m, self.n = X.shape[0], X.shape[1]
self.y = y
self.target = target
self.prepro() #
# convert the label array to 0 if it isn't the target number and 1 if it is
def prepro(self):
""" *** TODO *** """
# write code to preprocess the y array for this classifier
# return the cost (unregularized) of the model
def cost(self):
""" *** TODO *** """
# implement the cost function
# return a (n,1) array that is the gradient of the current cost function
def grad(self):
""" *** TODO *** """
# implement the gradient function
# return a vector of length n
# computes the hessian of the current model (n x n)
def hessian(self):
""" *** TODO *** """
# implement the hessian function
# return a n by n matrix
# update the current weights according to newton's method
def update(self):
""" *** TODO *** """
# when implementing personally, update theta according to Newton's method
# learn the weights to best classify the data
def train(self, epsilon=1e-10, max_iter=1000):
self.theta = zeros((self.n, 1))
""" *** TODO *** """
# learn the theta vector either with a personal implementation or scipy
| [
"lerner98@gmail.com"
] | lerner98@gmail.com |
3e06e424d414306886e7c95b237cf84183186642 | 000f57fa43ecf9f5353ca80ced3ad505698dbecb | /imagelib/imagelib/asgi.py | fe0cb3b33757901983427c7a3fe5d37e25d73ef2 | [] | no_license | kamal0072/imagegallary | 846e9ef43f6e0c42c98a4a4ad5cb22faef295936 | 91effde764710fd9bfc31b7dec238d143833e31e | refs/heads/master | 2023-05-02T07:32:35.805297 | 2021-05-21T05:35:05 | 2021-05-21T05:35:05 | 359,541,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for imagelib project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'imagelib.settings')
application = get_asgi_application()
| [
"hasan.kamaal0072@gmail.com"
] | hasan.kamaal0072@gmail.com |
36a200feb18f15a1297631a73a9889e56a449e68 | b9e5aebb49734ad47825130529bd64e59f690ecf | /chapter_6/glossary.py | 194c60acd268f7bf0ee08f8ad39daefce5e9056f | [] | no_license | mikegirenko/python-learning | dab0f67d990d95035f93720986c84aaf422f7a9f | db9e3f0e3897caf703169d1f14b15a9aa1901161 | refs/heads/master | 2021-07-09T08:03:40.535653 | 2020-08-05T00:13:41 | 2020-08-05T00:13:41 | 169,983,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | programming_glossary = {
'variable': 'place to store a value',
'loop': 'repeat something',
'string': 'a sequence of characters',
'integer': 'number without decimal',
'python': 'awesome language'
}
print('Variable is a ' + programming_glossary['variable'])
print('\nLoop used to ' + programming_glossary['loop'])
print('\nString is a ' + programming_glossary['string'])
print('\nInteger is a ' + programming_glossary['integer'])
print('\nPython is an ' + programming_glossary['python'])
| [
"mike.girenko@cybergrx.com"
] | mike.girenko@cybergrx.com |
7bfb37f70af121d1a093c1fcb7a03e3ecf35af5e | f301de68f64e52fc0770518248eafee6a3c25b1f | /breadcrumbs/templatetags/breadcrumbs_tags.py | 0af209fcbd4784382410ad129d06b159cc119228 | [] | no_license | bedna-KU/Shakal-NG | 1f0cf0ec8f8b2b0ab65e6ed4b954c095554df8a0 | c504424545afbe9238d6813962d2a96f7c4889a1 | refs/heads/master | 2020-12-26T03:55:57.579418 | 2015-04-25T15:42:47 | 2015-04-25T15:42:47 | 34,467,383 | 0 | 0 | null | 2015-04-23T16:17:17 | 2015-04-23T16:17:17 | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django_jinja import library
from jinja2 import contextfunction
@contextfunction
@library.global_function
def breadcrumb(context, contents, *args, **kwargs):
class_name = kwargs.pop('class', False)
url = kwargs.pop('absolute_url', False)
if not url:
url = kwargs.pop('url', False)
if url:
url = reverse(url, args=args, kwargs=kwargs)
breadcrumb_context = {
'contents': contents,
'url': url,
'class': class_name
}
context['breadcrumbs'].append(breadcrumb_context)
return ''
@library.global_function
def render_breadcrumbs(breadcrumbs):
breadcrumbs.reverse()
ctx = {'breadcrumbs': breadcrumbs}
return mark_safe(render_to_string('breadcrumbs/breadcrumbs.html', ctx))
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
c8dac64412d9546a59ff2d093c73b5e63d87cd36 | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras/keras21_cancer1.py | 496a8c24be1fe94f38d57acbd42a47279eec68f1 | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | # 유방암 예측 모델
import numpy as np
from sklearn.datasets import load_breast_cancer
#1. 데이터
datasets = load_breast_cancer()
print(datasets.DESCR)
print(datasets.feature_names)
x = datasets.data
y = datasets.target
print(x.shape) # (569, 30)
print(y.shape) # (569,)
# print(x[:5])
# print(y)
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, train_size = 0.8, random_state = 66)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size=0.2, shuffle=True)
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)
# 전처리 알아서 해 / minmax, train_test_split
#2. 모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(30,)))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# 히든레이어 없어도 괜찮다.
# 3. 컴파일, 훈련
# mean_squared_error -> 풀네임도 가능함
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_train,y_train, epochs=100, validation_data=(x_val, y_val), batch_size=8)
#4. 평가, 예측
loss, acc = model.evaluate(x_test,y_test, batch_size=8)
print('loss, acc : ', loss, acc)
y_predict = model.predict(x_test[-5:-1])
print(np.where(y_predict<0.5, 0, 1))
print(y_test[-5:-1])
# loss, acc : 0.4048005938529968 0.9736841917037964
# [[1.0000000e+00]
# [3.7346189e-04]
# [9.9999988e-01]
# [9.9999976e-01]]
# [1 0 1 1]
# np.where사용
# [[1]
# [0]
# [1]
# [1]]
# [1 0 1 1]
| [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
8af6d7f68f38d74e6e2760a33a6b7938a5e73417 | cc0e5613f1532e9922269530057970eb4f320a1a | /setup.py | c7e8c3d5adec9f37717324ddec1ee1bef3a76e11 | [
"Apache-2.0"
] | permissive | Huskyeder/RxPY | 9e353e20f850ce8e031bacafa91187ff2d0d83e4 | 8060b9ef79d2fe6654c0265860af6e8829524131 | refs/heads/master | 2021-01-15T10:18:31.831559 | 2015-04-15T04:34:05 | 2015-04-15T04:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='Rx',
version='1.1.0',
description='Reactive Extensions (Rx) for Python',
long_description=("is a library for composing asynchronous and "
"event-based programs using observable collections and LINQ-style "
"query operators in Python."),
author='Dag Brattli',
author_email='dag@brattli.net',
license='Apache License',
url='http://reactivex.io',
download_url = 'https://github.com/ReactiveX/RxPY',
zip_safe = True,
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: IronPython',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='nose.collector',
packages=['rx', 'rx.internal',
'rx.linq', 'rx.linq.observable', 'rx.linq.enumerable',
'rx.concurrency', 'rx.concurrency.mainloopscheduler', 'rx.joins',
'rx.linq.observable.blocking', 'rx.disposables', 'rx.subjects',
'rx.backpressure', 'rx.testing'],
package_dir={'rx':'rx'}
)
| [
"dag@brattli.net"
] | dag@brattli.net |
884ff4c73b7650a2551a86b231ce63b7b72b186f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/f780f4ba36130202c1c33c2039f2c1a108f3530d-<tanh>-bug.py | d537e1b6b2a3ad0ec1cea9b31886a0912d12355c | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py |
def tanh(x, name=None):
'Computes hyperbolic tangent of `x` element-wise.\n\n Args:\n x: A Tensor or SparseTensor with type `float`, `double`, `int32`,\n `complex64`, `int64`, or `qint32`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor or SparseTensor respectively with the same type as `x` if\n `x.dtype != qint32` otherwise the return type is `quint8`.\n '
with ops.name_scope(name, 'Tanh', [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
a38b5e6c0ff87e0c5e46d4090ef5f4b725d3562a | 4acc08d2c165b5d88119df6bb4081bcfaca684f7 | /python_program/find_SI_amount.py | 006ebc10ba141c478ce3b084f1d9dfaaa45ca23f | [] | no_license | xiaotuzixuedaima/PythonProgramDucat | 9059648f070db7304f9aaa45657c8d3df75f3cc2 | 90c6947e6dfa8ebb6c8758735960379a81d88ae3 | refs/heads/master | 2022-01-16T04:13:17.849130 | 2019-02-22T15:43:18 | 2019-02-22T15:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #22. Simple Interest and ammount find the Given all the Required Values..??
p= float(input("enter the principle"))
r= float(input("enter the rate of interest"))
t= float(input("enter the time"))
SI= (p*r*t)/100
print("simple interest =",SI)
A= p+SI
print("ammount = ",A) | [
"ss7838094755@gmail.com"
] | ss7838094755@gmail.com |
e8c8e2c9083d4608f16657d653d28405842f396e | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/appcenter_sdk/models/AudienceListResult.py | 1daf495e95204f5fbfbe411354be8909034bc884 | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 3,956 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class AudienceListResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
Calculating = "Calculating"
Ready = "Ready"
Disabled = "Disabled"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'values': 'array',
'next_link': 'string'
}
attribute_map = {
'values': 'values',
'next_link': 'next_link'
}
def __init__(self, values=None, next_link=None): # noqa: E501
"""AudienceListResult - a model defined in Swagger""" # noqa: E501
self._values = None
self._next_link = None
self.discriminator = None
self.values = values
if next_link is not None:
self.next_link = next_link
@property
def values(self):
"""Gets the values of this AudienceListResult. # noqa: E501
List of audiences. # noqa: E501
:return: The values of this AudienceListResult. # noqa: E501
:rtype: array
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this AudienceListResult.
List of audiences. # noqa: E501
:param values: The values of this AudienceListResult. # noqa: E501
:type: array
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def next_link(self):
"""Gets the next_link of this AudienceListResult. # noqa: E501
:return: The next_link of this AudienceListResult. # noqa: E501
:rtype: string
"""
return self._next_link
@next_link.setter
def next_link(self, next_link):
"""Sets the next_link of this AudienceListResult.
:param next_link: The next_link of this AudienceListResult. # noqa: E501
:type: string
"""
self._next_link = next_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudienceListResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
975cd562124cdf58c34d5b5f082dd109d3e2f569 | 7ba34bf0e690ffbdd47df66a06ae04e321490bd5 | /etsproxy/logger/api.py | 2b3087792313ec60fb3693defa6736aefa2aa11f | [] | no_license | simvisage/etsproxy | f8daf32f598aff241dc90c1d4788e0acdaead266 | 529d33c051856a79796fa3dd593a0d1d778a6ebe | refs/heads/master | 2016-09-03T00:24:17.900544 | 2015-06-11T09:52:38 | 2015-06-11T09:52:38 | 3,874,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #-------------------------------------------------------------------------------
#
# Copyright (c) 2012
# IMB, RWTH Aachen University,
# ISM, Brno University of Technology
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in the Spirrid top directory "licence.txt" and may be
# redistributed only under the conditions described in the aforementioned
# license.
#
# Thanks for using Simvisage open source!
#
#-------------------------------------------------------------------------------
# ETS 3
try:
from enthought.logger.api import *
# ETS 4
except ImportError:
from apptools.logger.api import *
| [
"matthias@minkowski.lbb.rwth-aachen.de"
] | matthias@minkowski.lbb.rwth-aachen.de |
f94e13dcb6fbc13a6b2d2144841b96d2a2296f0e | a3482e5b922bcc5b8d8fd3dc49a29a3073176191 | /source_py2/python_toolbox/function_anchoring_type.py | 253b395bd217c17dce1c7ed514aaf3f7a7fbc249 | [
"MIT"
] | permissive | apddup/python_toolbox | 4d2079826218255240a27b9b977b3a4fc2045ee3 | 2d336f361122ad4216669b7a3e1d794fa2a76db1 | refs/heads/master | 2021-01-18T17:09:02.879773 | 2013-10-06T18:20:34 | 2013-10-06T18:20:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | # Copyright 2009-2013 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `FunctionAnchoringType` class.
See its documentation for more details.
'''
import sys
import types
from python_toolbox import misc_tools
class FunctionAnchoringType(type):
'''
Metaclass for working around Python's problems with pickling functions.
Python has a hard time pickling functions that are not at module level,
because when unpickling them, Python looks for them only on the module
level.
What we do in this function is create a reference to each of the class's
functions on the module level. We call this "anchoring." Note that we're
only anchoring the *functions*, not the *methods*. Methods *can* be pickled
by Python, but plain functions, like those created by `staticmethod`,
cannot.
This workaround is hacky, yes, but it seems like the best solution until
Python learns how to pickle non-module-level functions.
'''
def __new__(mcls, name, bases, namespace_dict):
my_type = super(FunctionAnchoringType, mcls).__new__(mcls,
name,
bases,
namespace_dict)
# We want the type's `vars`, but we want them "getted," and not in a
# `dict`, so we'll get method objects instead of plain functions.
my_getted_vars = misc_tools.getted_vars(my_type)
# Repeat after me: "Getted, not dict."
functions_to_anchor = [value for key, value in my_getted_vars.items()
if isinstance(value, types.FunctionType) and not
misc_tools.is_magic_variable_name(key)]
for function in functions_to_anchor:
module_name = function.__module__
module = sys.modules[module_name]
function_name = function.__name__
# Since this metaclass is a hacky enough solution as it is, let's
# be careful and ensure no object is already defined by the same
# name in the module level: (todotest)
try:
already_defined_object = getattr(module, function_name)
except AttributeError:
# Good, there is no object defined under our anchor address.
# This is the normal case.
setattr(module, function_name, function)
else:
# Something already exists at the anchor address; let's be
# careful.
if already_defined_object is not function:
raise Exception("An object `%s.%s` already exists! Can't "
"anchor function." % \
(module_name, function_name))
return my_type
| [
"ram@rachum.com"
] | ram@rachum.com |
a7f8779a37163981738ccc72f6f703aaf56fc54f | 75876aac4ff3466f98188925e9e7d3b8367d1558 | /25-class-metaprog/checked/metaclass/checkedlib.py | 34484acb32c474866335e074c603719b8ee1c51d | [
"MIT"
] | permissive | CrazyGuo/example-code-2e | 88ec45d070036c69501393ab03fee6732b9ae2a2 | e986e3bdc06fd29faf97e06a1c55ba95aba7c728 | refs/heads/master | 2023-06-02T21:10:24.254598 | 2021-06-17T20:07:01 | 2021-06-17T20:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,821 | py | """
A ``Checked`` subclass definition requires that keyword arguments are
used to create an instance, and provides a nice ``__repr__``::
# tag::MOVIE_DEFINITION[]
>>> class Movie(Checked): # <1>
... title: str # <2>
... year: int
... box_office: float
...
>>> movie = Movie(title='The Godfather', year=1972, box_office=137) # <3>
>>> movie.title
'The Godfather'
>>> movie # <4>
Movie(title='The Godfather', year=1972, box_office=137.0)
# end::MOVIE_DEFINITION[]
The type of arguments is runtime checked during instantiation
and when an attribute is set::
# tag::MOVIE_TYPE_VALIDATION[]
>>> blockbuster = Movie(title='Avatar', year=2009, box_office='billions')
Traceback (most recent call last):
...
TypeError: 'billions' is not compatible with box_office:float
>>> movie.year = 'MCMLXXII'
Traceback (most recent call last):
...
TypeError: 'MCMLXXII' is not compatible with year:int
# end::MOVIE_TYPE_VALIDATION[]
Attributes not passed as arguments to the constructor are initialized with
default values::
# tag::MOVIE_DEFAULTS[]
>>> Movie(title='Life of Brian')
Movie(title='Life of Brian', year=0, box_office=0.0)
# end::MOVIE_DEFAULTS[]
Providing extra arguments to the constructor is not allowed::
>>> blockbuster = Movie(title='Avatar', year=2009, box_office=2000,
... director='James Cameron')
Traceback (most recent call last):
...
AttributeError: 'Movie' object has no attribute 'director'
Creating new attributes at runtime is restricted as well::
>>> movie.director = 'Francis Ford Coppola'
Traceback (most recent call last):
...
AttributeError: 'Movie' object has no attribute 'director'
The `_as_dict` instance creates a `dict` from the attributes of a `Movie` object::
>>> movie._asdict()
{'title': 'The Godfather', 'year': 1972, 'box_office': 137.0}
"""
from collections.abc import Callable
from typing import Any, NoReturn, get_type_hints
# tag::CHECKED_FIELD[]
class Field:
def __init__(self, name: str, constructor: Callable) -> None:
if not callable(constructor) or constructor is type(None):
raise TypeError(f'{name!r} type hint must be callable')
self.name = name
self.storage_name = '_' + name # <1>
self.constructor = constructor
def __get__(self, instance, owner=None): # <2>
return getattr(instance, self.storage_name) # <3>
def __set__(self, instance: Any, value: Any) -> None:
if value is ...:
value = self.constructor()
else:
try:
value = self.constructor(value)
except (TypeError, ValueError) as e:
type_name = self.constructor.__name__
msg = f'{value!r} is not compatible with {self.name}:{type_name}'
raise TypeError(msg) from e
setattr(instance, self.storage_name, value) # <4>
# end::CHECKED_FIELD[]
# tag::CHECKED_META[]
class CheckedMeta(type):
def __new__(meta_cls, cls_name, bases, cls_dict): # <1>
if '__slots__' not in cls_dict: # <2>
slots = []
type_hints = cls_dict.get('__annotations__', {}) # <3>
for name, constructor in type_hints.items(): # <4>
field = Field(name, constructor) # <5>
cls_dict[name] = field # <6>
slots.append(field.storage_name) # <7>
cls_dict['__slots__'] = slots # <8>
return super().__new__(
meta_cls, cls_name, bases, cls_dict) # <9>
# end::CHECKED_META[]
# tag::CHECKED_CLASS[]
class Checked(metaclass=CheckedMeta):
__slots__ = () # skip CheckedMeta.__new__ processing
@classmethod
def _fields(cls) -> dict[str, type]:
return get_type_hints(cls)
def __init__(self, **kwargs: Any) -> None:
for name in self._fields():
value = kwargs.pop(name, ...)
setattr(self, name, value)
if kwargs:
self.__flag_unknown_attrs(*kwargs)
def __flag_unknown_attrs(self, *names: str) -> NoReturn:
plural = 's' if len(names) > 1 else ''
extra = ', '.join(f'{name!r}' for name in names)
cls_name = repr(self.__class__.__name__)
raise AttributeError(f'{cls_name} object has no attribute{plural} {extra}')
def _asdict(self) -> dict[str, Any]:
return {
name: getattr(self, name)
for name, attr in self.__class__.__dict__.items()
if isinstance(attr, Field)
}
def __repr__(self) -> str:
kwargs = ', '.join(
f'{key}={value!r}' for key, value in self._asdict().items()
)
return f'{self.__class__.__name__}({kwargs})'
# end::CHECKED_CLASS[]
| [
"luciano@ramalho.org"
] | luciano@ramalho.org |
35319f278e7ff5b9496440f2e6be1e4aaa8e0278 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/signature_management/models/hr_payroll.py | ac481a3635355f80bcf2991db1bef188877435ec | [] | no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | # -*- coding: utf-8 -*-
from openerp import api, exceptions, fields, models, _
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
# @api.depends('signature')
# def _compute_signature_image(self):
# if self.env.user.main_signature == 'upload_file':
# self.signature = self.env.user.upload_datas
# else:
# self.signature = self.env.user.signature
signature = fields.Binary(
string='Signature', help='Add signature.')
confirmed_id = fields.Many2one(
comodel_name='res.users', string='Confirmed By', help='')
confirmed_signature = fields.Binary(
string="Confirmed Signature")
confirmed_date_time = fields.Datetime(
string='Confirmed On', help='Confirmed datetime.')
@api.model
def create(self,vals):
if self.env.user.main_signature == 'upload_file':
vals['signature'] = self.env.user.upload_datas
else:
vals['signature'] = self.env.user.signature
return super(HrPayslip, self).create(vals)
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
if self.env.user.main_signature == 'upload_file':
confirmed_signature = self.env.user.upload_datas
else:
confirmed_signature = self.env.user.signature
self.write({'confirmed_id': self.env.user.id,
'confirmed_signature': confirmed_signature,
'confirmed_date_time': fields.Datetime.now(),
})
return res
HrPayslip()
| [
"jbalu2801@gmail.com"
] | jbalu2801@gmail.com |
2da3dba6b1e41e7d8bc3bb83e31b842809e33515 | b5cdfaccd2d1a96068be10e77fe051841e8ffbd5 | /ctrModels/layers/DeepModule.py | f8b4c91f342e7e5b1f37d455255e621d828948ee | [] | no_license | gctian/RecommendRelative | a56a18eb36e6deeeb093b6356ebd9c3e16a3f3c5 | 31ec4530ae8bbb21fb2b88559b649458c2336df1 | refs/heads/master | 2022-04-09T12:53:19.902132 | 2020-03-05T03:46:01 | 2020-03-05T03:46:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class DNN(nn.Module):
def __init__(self, deep_input_dim, hidden_units, activation=F.relu, l2_reg=0,
dnn_dropout=0, use_bn=False, init_std=0.0001, seed=2019, device='cpu'):
super(DNN, self).__init__()
self.activation = activation
self.dropout = nn.Dropout(dnn_dropout)
self.l2_reg = l2_reg
self.use_bn = use_bn
self.seed = seed
if len(hidden_units) == 0:
raise ValueError("hidden_units is empty!")
print(deep_input_dim)
# Dense Layer
hidden_units = [deep_input_dim] + hidden_units
self.linears = nn.ModuleList(
[nn.Linear(hidden_units[i], hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
if self.use_bn:
self.bn = nn.ModuleList([nn.BatchNorm1d(hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])
for name, tensor in self.linears.named_parameters():
if 'weight' in name:
nn.init.normal_(tensor, mean=0, std=init_std)
self.lst_linear = nn.Linear(hidden_units[-1], 1, bias=False)
self.to(device)
def forward(self, X):
deep_input = X
for i in range(len(self.linears)):
fc = self.linears[i](deep_input)
if self.use_bn:
fc = self.bn[i](fc)
fc = self.activation(fc)
fc = self.dropout(fc)
deep_input = fc
out = self.lst_linear(deep_input)
return out
| [
"yangjieyu@zju.edu.cn"
] | yangjieyu@zju.edu.cn |
f4b4d00b718585ed31cd29ce71b8f60e07093500 | 583b2b81e0e7ac3c910bc00204f877aa9c48bdae | /Analysis/HiggsTauTau/scripts/plotNLOReweight.py | a9ee60fd009b0ae60b3758209331920e89274949 | [] | no_license | danielwinterbottom/ICHiggsTauTau | 8c88a3322c8e362114a53c559634a7ecceb266d2 | 69f6b580ec1faa4e5b6ef931be880f939f409979 | refs/heads/master | 2023-09-01T17:49:18.865198 | 2023-02-07T13:14:15 | 2023-02-07T13:14:15 | 10,723,688 | 4 | 12 | null | 2023-09-07T08:41:44 | 2013-06-16T18:08:22 | Python | UTF-8 | Python | false | false | 2,612 | py | #!/usr/bin/env python
import ROOT
import CombineHarvester.CombineTools.plotting as plot
from UserCode.ICHiggsTauTau.uncertainties import ufloat
import UserCode.ICHiggsTauTau.MultiDraw
import argparse
def Rate(hist):
err = ROOT.Double()
integral = hist.IntegralAndError(0, hist.GetNbinsX() + 1, err)
return ufloat(integral, err)
# Boilerplate
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(ROOT.kTRUE)
plot.ModTDRStyle()
parser = argparse.ArgumentParser()
parser.add_argument('--var', default='mll(20,0,500)')
parser.add_argument('--tree', default='ntuple')
parser.add_argument('--selection', default='1')
parser.add_argument('--x-title', default='m_{ll}^{gen} (GeV)')
parser.add_argument('--ratio-range', default='0.69,1.31')
parser.add_argument('--title-right', default='')
parser.add_argument('--title-left', default='')
parser.add_argument('--output', default='mt_dist')
parser.add_argument('--file', default='')
# parser.add_argument('--norm', action='store_true')
# parser.add_argument('--type', default=0, type=int)
args = parser.parse_args()
files = [args.file]
# 'output/Mar05-postpreapp/SUSYGluGluToHToTauTau_M-500_mt_0']
ROOT.TH1.SetDefaultSumw2(True) # Why do I need to do this here?
chain = ROOT.TChain(args.tree)
for f in files:
chain.Add('%s.root' % f)
res = chain.MultiDraw([(args.var, '(%s)' % args.selection), (args.var, 'wt_nlo_pt*(%s)' % args.selection)])
canv = ROOT.TCanvas(args.output, args.output)
pads = plot.TwoPadSplit(0.30, 0.01, 0.01)
legend = ROOT.TLegend(0.50, 0.75, 0.93, 0.93, '', 'NBNDC')
plot.Set(res[0], LineColor=ROOT.kBlack, LineWidth=3)
plot.Set(res[1], LineColor=ROOT.kRed, LineWidth=3)
legend.AddEntry(res[0], 'Unweighted', 'L')
legend.AddEntry(res[1], 'Weighted', 'L')
res[0].Draw('HISTESAME')
res[1].Draw('HISTESAME')
plot.FixTopRange(pads[0], plot.GetPadYMax(pads[0]), 0.40)
delta_acc = Rate(res[1]) / Rate(res[0])
legend.SetHeader('Rel. Acc.: %.3f #pm %.3f' % (delta_acc.n, delta_acc.s))
legend.Draw()
axis = plot.GetAxisHist(pads[0])
plot.Set(axis.GetXaxis(), Title=args.x_title)
plot.Set(axis.GetYaxis(), Title='a.u.')
rmin = float(args.ratio_range.split(',')[0])
rmax = float(args.ratio_range.split(',')[1])
pads[1].cd()
pads[1].SetGrid(0, 1)
ratio_hist = res[1].Clone()
ratio_hist.Divide(res[0])
ratio_hist.Draw('HISTESAME')
plot.SetupTwoPadSplitAsRatio(
pads, plot.GetAxisHist(
pads[0]), plot.GetAxisHist(pads[1]), 'Ratio', True, rmin, rmax)
plot.DrawTitle(pads[0], args.title_left, 1)
plot.DrawTitle(pads[0], args.title_right, 3)
res[0].Print()
canv.Print('.pdf')
canv.Print('.png')
| [
"andrew.gilbert@cern.ch"
] | andrew.gilbert@cern.ch |
0c14dec5d07e86860f0dbe7a9e724f70efba9bb7 | e1dd6d9dccb822d472b7f4f9e8446dd9202eb5a1 | /sdk/test/test_io_k8s_apiextensions_apiserver_pkg_apis_apiextensions_v1beta1_webhook_client_config.py | 331dcbfa4b3324afd3605ebf8a82794a5fe8e3a8 | [] | no_license | swiftdiaries/argo_client | 8af73e8df6a28f9ea5f938b5894ab8b7825e4cc2 | b93758a22d890cb33cbd81934042cfc3c12169c7 | refs/heads/master | 2020-05-17T12:11:57.556216 | 2019-07-24T23:23:33 | 2019-07-24T23:23:33 | 183,701,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # coding: utf-8
"""
Argo API Client
Generated python client for the Argo Workflows # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo.sdk
from models.io_k8s_apiextensions_apiserver_pkg_apis_apiextensions_v1beta1_webhook_client_config import IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig # noqa: E501
from argo.sdk.rest import ApiException
class TestIoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig(unittest.TestCase):
"""IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig(self):
"""Test IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.sdk.models.io_k8s_apiextensions_apiserver_pkg_apis_apiextensions_v1beta1_webhook_client_config.IoK8sApiextensionsApiserverPkgApisApiextensionsV1beta1WebhookClientConfig() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"adhita94@gmail.com"
] | adhita94@gmail.com |
2204d2acfb965654d6c7a0734ed51fcb94833e6f | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/Flask_Movie_Site-master/app/models.py | 9446323ade377e1f06599e4113013e1574ab1f92 | [] | no_license | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,858 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Version : 1.0
# @Author : Ricky.YangRui
from datetime import datetime
from app import db
"""
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import pymysql
import os
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:root@localhost/movie"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
db = SQLAlchemy(app)
"""
# 会员
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 昵称
pwd = db.Column(db.String(100)) # 密码
email = db.Column(db.String(100), unique=True) # 邮箱
phone = db.Column(db.String(11), unique=True) # 手机号码
info = db.Column(db.Text) # 个性简介
face = db.Column(db.String(255), unique=True) # 头像
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 注册时间
uuid = db.Column(db.String(255), unique=True) # 唯一标志符
userlogs = db.relationship('Userlog', backref='user') # 会员日志外键关系关联
comments = db.relationship('Comment', backref='user') # 评论外键关系关联
moviecols = db.relationship('Moviecol', backref='user') # 收藏外键关系关联
def __repr__(self):
return "<User %r>" % self.name
def check_pwd(self, pwd):
from werkzeug.security import check_password_hash
return check_password_hash(self.pwd, pwd)
# 会员登录日志
class Userlog(db.Model):
__tablename__ = "userlog"
id = db.Column(db.Integer, primary_key=True) # 编号
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # 所属会员
ip = db.Column(db.String(100)) # 登录IP
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 登录时间
def __repr__(self):
return "<Userlog %r>" % self.id
# 标签
class Tag(db.Model):
__tablename__ = "tag"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 标题
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
movies = db.relationship("Movie", backref='tag') # 电影外键关系关联
def __repr__(self):
return "<Tag %r>" % self.name
# 电影
class Movie(db.Model):
__tablename__ = "movie"
id = db.Column(db.Integer, primary_key=True) # 编号
title = db.Column(db.String(255), unique=True) # 标题
url = db.Column(db.String(255), unique=True) # 地址
info = db.Column(db.Text) # 简介
logo = db.Column(db.String(255), unique=True) # 封面
star = db.Column(db.SmallInteger) # 星级
playnum = db.Column(db.BigInteger) # 播放量
commentnum = db.Column(db.BigInteger) # 评论量
tag_id = db.Column(db.Integer, db.ForeignKey('tag.id')) # 所属标签
area = db.Column(db.String(255)) # 上映地区
release_time = db.Column(db.Date) # 上映时间
length = db.Column(db.String(100)) # 播放时间
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
comments = db.relationship("Comment", backref='movie') # 评论外键关系关联
moviecols = db.relationship("Moviecol", backref='movie') # 收藏外键关系关联
def __repr__(self):
return "<Movie %r>" % self.title
# 上映预告
class Preview(db.Model):
__tablename__ = "preview"
id = db.Column(db.Integer, primary_key=True) # 编号
title = db.Column(db.String(255), unique=True) # 标题
logo = db.Column(db.String(255), unique=True) # 封面
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
def __repr__(self):
return "<Preview %r>" % self.title
# 评论
class Comment(db.Model):
__tablename__ = "comment"
id = db.Column(db.Integer, primary_key=True) # 编号
content = db.Column(db.Text) # 内容
movie_id = db.Column(db.Integer, db.ForeignKey('movie.id')) # 所属电影
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # 所属用户
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
def __repr__(self):
return "<Comment %r>" % self.id
# 电影收藏
class Moviecol(db.Model):
__tablename__ = "moviecol"
id = db.Column(db.Integer, primary_key=True) # 编号
movie_id = db.Column(db.Integer, db.ForeignKey('movie.id')) # 所属电影
user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # 所属用户
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
def __repr__(self):
return "<Moviecol %r>" % self.id
# 权限
class Auth(db.Model):
__tablename__ = "auth"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 名称
url = db.Column(db.String(255), unique=True) # 地址
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
def __repr__(self):
return "<Auth %r>" % self.name
# 角色
class Role(db.Model):
__tablename__ = "role"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 名称
auths = db.Column(db.String(600)) # 角色权限列表
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
admins = db.relationship("Admin", backref='role') # 管理员外键关系关联
def __repr__(self):
return "<Role %r>" % self.name
# 管理员
class Admin(db.Model):
__tablename__ = "admin"
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(100), unique=True) # 管理员账号
pwd = db.Column(db.String(100)) # 管理员密码
is_super = db.Column(db.SmallInteger) # 是否为超级管理员,0为超级管理员
role_id = db.Column(db.Integer, db.ForeignKey('role.id')) # 所属角色
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 添加时间
adminlogs = db.relationship("Adminlog", backref='admin') # 管理员登录日志外键关系关联
oplogs = db.relationship("Oplog", backref='admin') # 管理员操作日志外键关系关联
def __repr__(self):
return "<Admin %r>" % self.name
def check_pwd(self, pwd):
from werkzeug.security import check_password_hash
return check_password_hash(self.pwd, pwd)
# 管理员登录日志
class Adminlog(db.Model):
__tablename__ = "adminlog"
id = db.Column(db.Integer, primary_key=True) # 编号
admin_id = db.Column(db.Integer, db.ForeignKey('admin.id')) # 所属管理员
ip = db.Column(db.String(100)) # 登录IP
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 登录时间
def __repr__(self):
return "<Adminlog %r>" % self.id
# 操作日志
class Oplog(db.Model):
__tablename__ = "oplog"
id = db.Column(db.Integer, primary_key=True) # 编号
admin_id = db.Column(db.Integer, db.ForeignKey('admin.id')) # 所属管理员
ip = db.Column(db.String(100)) # 登录IP
reason = db.Column(db.String(600)) # 操作原因
addtime = db.Column(db.DateTime, index=True, default=datetime.now) # 登录时间
def __repr__(self):
return "<Oplog %r>" % self.id
"""
# 测试添加数据
if __name__ == "__main__":
db.create_all()
role = Role(
name="超级管理",
auths="1"
)
db.session.add(role)
db.session.commit()
# 哈希加密密码
from werkzeug.security import generate_password_hash
admin = Admin(
name="ricky",
pwd=generate_password_hash("ricky"),
is_super=0,
role_id=1
)
db.session.add(admin)
db.session.commit()
"""
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
9c56f39998ac6357fc2138ba2549c677f6334ced | 8dc84ecbf6992eddd8225718d9841f7e9ffb87d0 | /cbv_project/core/migrations/0004_coments_number.py | ad0c3bc856cce1e12926c8bcbc27736cb37f3b40 | [] | no_license | projetosparalelos/DjangoClassBasedView | 45a50754f39b062f36153d4979ed8138bb19a2a6 | af78a70218c95ebcbcb0cbb62606c834a7d3d6a3 | refs/heads/master | 2020-04-24T21:38:53.998598 | 2016-10-27T18:01:06 | 2016-10-27T18:01:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-27 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_coments'),
]
operations = [
migrations.AddField(
model_name='coments',
name='number',
field=models.IntegerField(default=0),
),
]
| [
"gpzim98@gmail.com"
] | gpzim98@gmail.com |
733afbe6285c99270a4b2ba3703d0979eb69b31e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/2704.py | d7ee3aad02506313a6336b82a1f950228890cec3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py |
import sys
def runme():
alphadict = {
"a": "y",
"b": "h",
"c": "e",
"d": "s",
"e": "o",
"f": "c",
"g": "v",
"h": "x",
"i": "d",
"j": "u",
"k": "i",
"l": "g",
"m": "l",
"n": "b",
"o": "k",
"p": "r",
"q": "z",
"r": "t",
"s": "n",
"t": "w",
"u": "j",
"v": "p",
"w": "f",
"x": "m",
"y": "a",
"z": "q",
" ": " ",
"\n": "",
}
numcases = int(sys.stdin.readline())
cases = []
for x in range(0, numcases):
cases.append(sys.stdin.readline())
#print "Number of Cases:", numcases
count = 1
for case in cases:
astr = ""
for char in case:
if char in alphadict:
astr = astr + alphadict[char]
else:
print "[" + char + "]",
print "Case #%d: %s" % (count, astr)
count += 1
if __name__ == "__main__":
runme();
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
688b35e871ec5b608a44870d49fe56197ecd1450 | 2c3da6e0bddf55d64d650040bbf286c47b31811a | /学习路线/1.python基础/day07/01-HMCards-主逻辑/hm_card_tool.py | def8acb6716889b1f31dd20f7320f7706d929894 | [
"MIT"
] | permissive | Bngzifei/PythonNotes | 76bd53db3033a9c51ab4bdd727842cd89607b584 | 01590e1b6c1bc0f04aa2d355fa2553c04cce27f2 | refs/heads/master | 2023-02-04T06:49:00.725463 | 2020-12-15T09:26:40 | 2020-12-15T09:26:40 | 155,154,662 | 1 | 2 | MIT | 2020-09-08T01:30:19 | 2018-10-29T05:02:48 | Python | UTF-8 | Python | false | false | 2,449 | py | """写每个功能的实现细节"""
# 定义一个全局列表变量,用来保存所有的名片字典
card_list = []
def show_menu():
"""显示菜单"""
print('*' * 30)
print('欢迎使用[名片管理系统]')
print() # 空一行
print('1.新建名片')
print('2.显示全部')
print('3.查询名片')
print('4.退出系统')
print() # 空一行
print('*' * 30)
def new_card():
"""新建名片(保存数据)"""
# 1.1提示用户当前的功能
print('功能:新建名片')
# 2.让用户输入名片信息(姓名.电话.qq.邮箱)
name_str = input('请输入姓名:')
phone_str = input('请输入电话:')
qq_str = input('请输入qq:')
email_str = input('请输入邮箱:')
# 3.保存名片数据 (字典格式)
card_dict = {'name': name_str, # 记得这里的name_str是一个变量值(value),前面的name才是key,所以key需要自己给命名,见名知义即可
'phone': phone_str,
'qq': qq_str,
'email': email_str}
# 3.1 把名片字典添加到列表中
# 当用=号方式在函数内部给全局变量赋值时候,一定要提前global声明
card_list.append(card_dict) # 存的是内存地址(存了字典的内存地址),局部变量的字典销毁之后,列表的存的地址还在,里面的数据也还在()
# 无变量引用了才会销毁
# 4.提示用户名片添加成功
print('添加%s的名片成功' % name_str)
def show_all():
"""显示全部(取数据)"""
# 1.提示用户当前选择的功能
print('功能:显示全部')
# 2.判断列表中是否有名片信息
if len(card_list) == 0:
print('提示:没有任何名片信息')
# 法1:return结束函数
return # 提前让函数结束
# 法2:缩进到else里面
# else:
# # 3.输出表头信息
# # print('姓名\t\t电话\t\t姓名\t\t姓名\t\t')
# print('姓名', '电话', 'qq', '邮箱', sep='\t\t')
# print('-' * 30)
#
# # 4.变量名片列表,取出每个字典,按格式输出
# for card_dict in card_list:
# print(card_dict['name'], card_dict['phone'], card_dict['qq'], card_dict['email'], sep='\t\t')
# print('-' * 30)
# 3.输出表头信息
# print('姓名\t\t电话\t\t姓名\t\t姓名\t\t')
print('姓名', '电话', 'qq', '邮箱', sep='\t\t')
print('-' * 30)
# 4.变量名片列表,取出每个字典,按格式输出
for card_dict in card_list:
print(card_dict['name'], card_dict['phone'], card_dict['qq'], card_dict['email'], sep='\t\t')
print('-' * 30)
| [
"bngzifei@gmail.com"
] | bngzifei@gmail.com |
76a53311b026d5a51cd95708db422e3dea2e4db0 | b3c939e013ecfdd68b02344ad2936ae53dd1a725 | /mnist/cnn/learner.py | 7bdd01fd0210128d1562a53586af8857c54904af | [] | no_license | TakakiNishio/chainer | 3cd9d2972d72c30d1d4fb979692de26539903556 | 55c2771a1a72dccd738e1350ab539f517083ba33 | refs/heads/master | 2020-12-24T11:07:36.788998 | 2017-07-02T19:43:45 | 2017-07-02T19:43:45 | 73,190,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | #python library
import argparse
import time
#chainer libraty
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer import serializers
#python scripts
import network_structure as nn
import visualizer as v
#main
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
print 'GPU: ' + format(args.gpu)
print '# Minibatch-size: ' + format(args.batchsize)
print '# epoch: ' + format(args.epoch)
print ''
# Set up a neural network to train
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
model = L.Classifier(nn.CNN())
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001))
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(ndim=3)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,repeat=False, shuffle=False)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot at each epoch
#trainer.extend(extensions.snapshot())
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
start_time = time.time() #start time measurement
# Run the training
trainer.run()
execution_time = time.time() - start_time
print "execution time : " + str(execution_time)
print('saved the model')
serializers.save_npz('cnn.model', model)
print('saved the optimizer')
serializers.save_npz('cnn.state', optimizer)
v.loss_visualizer()
| [
"p104314t@mail.kyutech.jp"
] | p104314t@mail.kyutech.jp |
d7923aca77523b22ef837151459bea06217024fd | 8337bfdd69708f4bfbe345240dcccc7b8c7f5718 | /loglette/__main__.py | a4a12fdea7821dd124b6fb048a995945af6b419c | [
"MIT"
] | permissive | siku2/Loglette | 8f1c12ceb7f1009b5eab503ab7608b292be98739 | d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095 | refs/heads/master | 2020-03-24T07:10:26.454200 | 2018-08-01T13:01:02 | 2018-08-01T13:01:02 | 142,555,185 | 1 | 0 | MIT | 2018-08-01T13:01:03 | 2018-07-27T09:12:51 | Python | UTF-8 | Python | false | false | 1,440 | py | #!/usr/bin/env python
from argparse import ArgumentParser, Namespace
from pathlib import Path
from loglette import Version, parse
def run(args: Namespace):
changelogs = parse(args.file.read_text(), parser=args.parser)
# Check against presets using lowercase but make sure to preserve version tag!
version = args.version
version_str = version.lower()
if version_str == "flatten":
changelog = changelogs.flatten()
elif version_str == "latest":
changelog = changelogs.latest
elif version_str == "first":
changelog = changelogs.first
else:
target = Version.parse(version)
changelog = changelogs[target]
if changelog:
print(changelog.format(args.format))
else:
print("Nothing to display")
def main(*args):
args = args or None
parser = ArgumentParser("loglette", description="A tool to make changelogs easy or something like that")
parser.add_argument("file", type=Path, help="The file you'd like to parse")
parser.add_argument("-p", "--parser", default=None, help="Specify the parser to use. By default Loglette tries to guess the correct parser")
parser.add_argument("-f", "--format", default="markdown", help="output format")
parser.add_argument("-v", "--version", default="first", help="How to parse multiple changelogs")
args = parser.parse_args(args)
run(args)
if __name__ == "__main__":
main()
| [
"siku2@outlook.com"
] | siku2@outlook.com |
b8ebd8d932265f942caaeb33d3d4a5b99dc057ca | 1ee8a4325a33d79e44b8d9339ed9cc98272653c0 | /PyNeuronToolbox/record.py | 3bcedd9185c045fe7e626eb9fc22fd5c59d98332 | [
"MIT"
] | permissive | JustasB/PyNeuron-Toolbox | 7320f5af547674cf5f3a9adb08ffaa16f0a6ede7 | 3e257f298644374ee78a0b9f401f6c468007493c | refs/heads/master | 2021-01-15T09:47:29.539176 | 2016-08-30T00:49:21 | 2016-08-30T00:49:21 | 66,967,196 | 1 | 0 | null | 2016-08-30T18:33:21 | 2016-08-30T18:33:20 | null | UTF-8 | Python | false | false | 2,301 | py | import numpy as np
from morphology import allsec_preorder
def ez_record(h,var='v',sections=None,order=None,\
targ_names=None,cust_labels=None):
"""
Records state variables across segments
Args:
h = hocObject to interface with neuron
var = string specifying state variable to be recorded.
Possible values are:
'v' (membrane potential)
'cai' (Ca concentration)
sections = list of h.Section() objects to be recorded
targ_names = list of section names to be recorded; alternative
passing list of h.Section() objects directly
through the "sections" argument above.
cust_labels = list of custom section labels
Returns:
data = list of h.Vector() objects recording membrane potential
labels = list of labels for each voltage trace
"""
if sections is None:
if order == 'pre':
sections = allsec_preorder(h)
else:
sections = list(h.allsec())
if targ_names is not None:
old_sections = sections
sections = []
for sec in old_sections:
if sec.name() in targ_names:
sections.append(sec)
data, labels = [], []
for i in range(len(sections)):
sec = sections[i]
positions = np.linspace(0,1,sec.nseg+2)
for position in positions[1:-1]:
# record data
data.append(h.Vector())
if var is 'v':
data[-1].record(sec(position)._ref_v)
elif var is 'cai':
data[-1].record(sec(position)._ref_cai)
# determine labels
if cust_labels is None:
lab = sec.name()+'_'+str(round(position,5))
else:
lab = cust_labels[i]+'_'+str(round(position,5))
labels.append(lab)
return data, labels
def ez_convert(data):
"""
Takes data, a list of h.Vector() objects filled with data, and converts
it into a 2d numpy array, data_clean. This should be used together with
the ez_record command.
"""
data_clean = np.empty((len(data[0]),len(data)))
for (i,vec) in enumerate(data):
data_clean[:,i] = vec.to_python()
return data_clean
| [
"alex.h.willia@gmail.com"
] | alex.h.willia@gmail.com |
9f6aebcfee1047fc3157d48b5a8757dbc319411c | 7eaae831d5f854df4a2614f7479ad0efb24bb48d | /matplotlib.py | 784e9428a42161e53cd48a54c093117dbb95421e | [] | no_license | tt-n-walters/python-datascience | d49f220adced5222b25a00e8987a8128595b2505 | a893c30159391dde264d4e0980b9324b95253c00 | refs/heads/master | 2022-11-24T16:18:50.532221 | 2020-07-24T08:35:51 | 2020-07-24T08:35:51 | 280,103,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import sys
sys.path.append(sys.path.pop(0))
from matplotlib import pyplot
import numpy as np
import math
import random
x_coords = np.arange(-10, 11, 0.2)
y_coords = [random.random() * 10 for x in x_coords]
sizes = [random.choice([10, 20, 50, 100, 200]) for x in x_coords]
all_colours = ["red", "orange", "blue", "green", "cyan", "purple"]
colours = [random.choice(all_colours) for x in x_coords]
pyplot.style.use("ggplot")
pyplot.scatter(x_coords, y_coords, s=sizes, c=colours, alpha=0.5)
pyplot.show()
# To save
# pyplot.savefig("output.png")
| [
"nico.walters@techtalents.es"
] | nico.walters@techtalents.es |
9e8b24e25c7675ef8e00b1d23b1f772f3060ac13 | 04f89b0f89b9f0edff718a8dc915261f7cb28d93 | /hermes1d/hermes1d/fekete/hydrogen.py | ce21b53c2bf991b4d83373a93c31d6d628d40def | [] | no_license | MelanieBittl/hermes | 805025c609808c5a1a440f4bdd61114f29641ce2 | e037aa21366287845c0deb130941b902213805dd | refs/heads/master | 2021-01-17T22:37:02.160138 | 2011-09-23T16:45:01 | 2011-09-23T16:45:01 | 1,853,302 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | """
This module will eventually end up in SymPy itself.
For now we'll keep it here, until we polish the interface and get the job done.
"""
from sympy import var, factorial, sqrt, exp, S
def laguerre_poly(alpha, n, x):
r = 0
for m in range(n+1):
c = 1
for i in range(m+1, n+1):
c *= alpha+i
r += (-1)**m * c * x**m/(factorial(m)*factorial(n-m))
return r
def R_nl(n, l, r, a):
"""
Returns the radial Hydrogen wavefunction, as a function of "r".
"a" is the Bohr radius.
"""
return S(1)/n * sqrt(factorial(n-l-1)/factorial(n+l)/a**3) * (2*r/n/a)**l * \
laguerre_poly(2*l+1, n-l-1, 2*r/n/a)*exp(-r/n/a)
def R_nl_numeric(n, l, x):
"""
Returns the exact floating point value of the R_nl at the (float) point
'x'.
"""
return float(R_nl(n, l, x, 1).evalf())
if __name__ == "__main__":
var("alpha z")
print laguerre_poly(alpha, 3, z)
var("r")
for n in range(1, 7):
for l in range(0, n):
print "(%d, %d): %s" % (n, l, R_nl(n, l, r, 1))
print
print R_nl(4, 0, S(1)/2, 1).evalf()
print R_nl_numeric(4, 0, 0.5)
#from pylab import plot, show
#from numpy import arange
#x = arange(0, 20, 0.1)
#y = [R_nl(4, 0, r, 1) for r in x]
#plot(x, y)
#show()
| [
"ondrej@certik.cz"
] | ondrej@certik.cz |
76e7d778ee0b7b5498082e56a66e5331a47be2f6 | 4e563ac1ab395de8e44c230b0573ec57fe2fabb9 | /payment/views.py | 0238ddfd1834f5b63945e34b249d647dab3f0c51 | [
"MIT"
] | permissive | shabnam1374/BiaBegard | 59110e0758abc5c85e88459d35a74531f71d9ac8 | 46a38f04e6a5d29f31ebb07011d580e31c41ac48 | refs/heads/main | 2023-08-27T20:16:39.809034 | 2021-10-28T15:13:46 | 2021-10-28T15:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,713 | py | import os
import uuid
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from idpay.api import IDPayAPI
from .models import Invoice
from financial.models import Carts
def payment_init():
base_url = os.environ.get('BASE_URL')
api_key = os.environ.get('IDPAY_API_KEY')
sandbox = os.environ.get('IDPAY_SANDBOX')
return IDPayAPI(api_key, base_url, bool(sandbox))
def payment_start(request):
if request.method == 'POST':
order_id = uuid.uuid1()
amount = request.POST.get('amount')
payer = {
'name': request.user.username,
'mail': request.user.email,
}
cart = Carts.objects.filter(user_id=request.user.id).first()
items = cart.cart_items.filter(status='pending', is_selected=True).values('id')
item_id = [item['id'] for item in items]
# for item in items:
# item_id.append(item['id'])
# if item.qty > item.product.inventory:
# item.qty = item.product.inventory
# item.save()
record = Invoice(user_id=request.user.id, order_id=order_id, amount=int(amount))
record.save()
record.cart_items.add(*item_id)
record.save()
idpay_payment = payment_init()
result = idpay_payment.payment(str(order_id), amount, 'payment-return/', payer)
if 'id' in result:
record.status = 1
record.payment_id = result['id']
record.save()
return redirect(result['link'])
else:
txt = result['message']
else:
txt = "Bad Request"
return render(request, 'payment/error.html', {'txt': txt})
@csrf_exempt
def payment_return(request):
if request.method == 'POST':
pid = request.POST.get('id')
status = request.POST.get('status')
pidtrack = request.POST.get('track_id')
order_id = request.POST.get('order_id')
amount = request.POST.get('amount')
card = request.POST.get('card_no')
date = request.POST.get('date')
if Invoice.objects.filter(order_id=order_id, payment_id=pid, amount=amount, status=1).count() == 1:
idpay_payment = payment_init()
payment = Invoice.objects.get(payment_id=pid, amount=amount)
payment.status = status
payment.date = str(date)
payment.card_number = card
payment.idpay_track_id = pidtrack
payment.save()
if str(status) == '10':
result = idpay_payment.verify(pid, payment.order_id)
if 'status' in result:
payment.status = result['status']
payment.bank_track_id = result['payment']['track_id']
payment.save()
if result['status'] == 100:
items = payment.cart_items.all()
items.update(status="paid")
for item in items:
print(f'1 {item.product.inventory}')
item.product.inventory -= item.qty
print(f'2 {item.product.inventory}')
item.product.save()
return render(request, 'payment/error.html', {'txt': result['message']})
else:
txt = result['message']
else:
txt = "Error Code : " + str(status) + " | " + "Description : " + idpay_payment.get_status(status)
else:
txt = "Order Not Found"
else:
txt = "Bad Request"
return render(request, 'payment/error.html', {'txt': txt})
| [
"poya_kob@live.com"
] | poya_kob@live.com |
25a55a32a40fb6f32add641c422491b6797c5d3a | bfddc2da8c2e9d9467ff7fbf31ace9d0092dbda6 | /registered_agent/api/v1/viewsets.py | 32818679a6ce12f7a4b24e17f529a5d9b5e1a8dd | [] | no_license | crowdbotics-apps/real-estate-17525 | cf9af364fb9ca884e2ffed97db72c56beb302939 | d687815aed2e4bcfd35d63d33bf58ee0c66c3fbe | refs/heads/master | 2023-05-13T12:18:47.250650 | 2020-05-29T01:48:36 | 2020-05-29T01:48:36 | 267,735,595 | 0 | 0 | null | 2021-06-10T11:03:44 | 2020-05-29T01:22:55 | Python | UTF-8 | Python | false | false | 462 | py | from rest_framework import authentication
from registered_agent.models import Registered_agent
from .serializers import Registered_agentSerializer
from rest_framework import viewsets
class Registered_agentViewSet(viewsets.ModelViewSet):
serializer_class = Registered_agentSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Registered_agent.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b3f8345e4853c487fb969098b3cbc12b0db9e9aa | 8ecd1d9d1760acbf1f9b999363a5d150460e1c2a | /E8-22.py | 3071690c7147836a645396df8089c09de57d333a | [] | no_license | Gary2018X/data_processing_primer | 0910c7c37af7ed03546520303c7d1660c62b0254 | b0d2cb16b4fcaed08f01accb4695232a33131d28 | refs/heads/master | 2023-01-14T17:39:48.856606 | 2020-11-17T03:21:32 | 2020-11-17T03:21:32 | 298,432,953 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
# author:Gary
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# ********典型图制作:3D图:3D散点图******#
np.random.seed(0)
X = np.random.randn(1000)
Y = np.random.randn(1000)
Z = np.random.randn(1000)
# 生成Figure对象
fig = plt.figure(figsize=(6, 6))
# 生成子图ax
ax = fig.add_subplot(1, 1, 1, projection="3d")
# x、Y、z变量的数据转换为一维数据x、y、z变量
x = np.ravel(X)
y = np.ravel(Y)
z = np.ravel(Z)
# 制作3D散点图
ax.scatter3D(x, y, z)
plt.show()
| [
"56428019+Gary2018X@users.noreply.github.com"
] | 56428019+Gary2018X@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.