blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
3c01565ab29a50b5e2ab3b2e0ebc8671fa00fa1d
84fbca1dd86aa1d7d65afd65bc65a71420513fb8
/queriesapp/migrations/0002_auto_20200313_1226.py
4fc15376311aefa9eef66940a093094bb9bb41e9
[]
no_license
corri-golden/queries
40a7415868149014dcf9d20dedd17801b4552f0b
fc7a4f26f185e3a6018cd257a12abe74019db531
refs/heads/master
2021-09-26T16:47:14.470875
2021-03-20T04:23:57
2021-03-20T04:23:57
245,893,279
0
0
null
null
null
null
UTF-8
Python
false
false
927
py
# Generated by Django 3.0.4 on 2020-03-13 12:26 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('queriesapp', '0001_initial'), ] operations = [ migrations.CreateModel( name='Status', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status_name', models.CharField(max_length=50)), ], options={ 'verbose_name': 'status', 'verbose_name_plural': 'statuses', }, ), migrations.AddField( model_name='query', name='status', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='queriesapp.Status'), preserve_default=False, ), ]
[ "corri.golden@gmail.com" ]
corri.golden@gmail.com
a65d310a45773ea11f0818aa6af806ea9d490511
3de1e940512394a6aebe499a9ce07a33a427ea7b
/tools/stats/monitor.py
972d0dbea038bda843c1db9f7728bd4e8bbd1fa5
[ "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "BSL-1.0", "Apache-2.0", "BSD-2-Clause" ]
permissive
shinh/pytorch
3719d1a6129db16e932452bec9b6646edf153226
c6b7c33885eeff9dc125f87c7134772d59d0ba21
refs/heads/master
2022-10-21T22:03:14.775982
2022-10-10T05:47:11
2022-10-10T05:47:11
220,897,452
1
0
NOASSERTION
2019-11-11T03:56:40
2019-11-11T03:56:39
null
UTF-8
Python
false
false
2,871
py
#!/usr/bin/env python3 import datetime import json import signal import time from typing import Any, Dict, List import psutil # type: ignore[import] import pynvml # type: ignore[import] def get_processes_running_python_tests() -> List[Any]: python_processes = [] for process in psutil.process_iter(): try: if "python" in process.name() and process.cmdline(): python_processes.append(process) except (psutil.NoSuchProcess, psutil.AccessDenied): # access denied or the process died pass return python_processes def get_per_process_cpu_info() -> List[Dict[str, Any]]: processes = get_processes_running_python_tests() per_process_info = [] for p in processes: info = { "pid": p.pid, "cmd": " ".join(p.cmdline()), "cpu_percent": p.cpu_percent(), "rss_memory": p.memory_info().rss, "uss_memory": p.memory_full_info().uss, } if "pss" in p.memory_full_info(): # only availiable in linux info["pss_memory"] = p.memory_full_info().pss per_process_info.append(info) return per_process_info def get_per_process_gpu_info(handle: Any) -> List[Dict[str, Any]]: processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) per_process_info = [] for p in processes: info = {"pid": p.pid, "gpu_memory": p.usedGpuMemory} per_process_info.append(info) return per_process_info if __name__ == "__main__": handle = None try: pynvml.nvmlInit() handle = pynvml.nvmlDeviceGetHandleByIndex(0) except pynvml.NVMLError: # no pynvml avaliable, probably because not cuda pass kill_now = False def exit_gracefully(*args: Any) -> None: global kill_now kill_now = True signal.signal(signal.SIGTERM, exit_gracefully) while not kill_now: try: stats = { "time": datetime.datetime.utcnow().isoformat("T") + "Z", "total_cpu_percent": psutil.cpu_percent(), "per_process_cpu_info": get_per_process_cpu_info(), } if handle is not None: stats["per_process_gpu_info"] = get_per_process_gpu_info(handle) # https://docs.nvidia.com/deploy/nvml-api/structnvmlUtilization__t.html gpu_utilization = pynvml.nvmlDeviceGetUtilizationRates(handle) stats["total_gpu_utilization"] = gpu_utilization.gpu stats["total_gpu_mem_utilization"] = gpu_utilization.memory except Exception as e: stats = { "time": datetime.datetime.utcnow().isoformat("T") + "Z", "error": str(e), } finally: print(json.dumps(stats)) time.sleep(1)
[ "pytorchmergebot@users.noreply.github.com" ]
pytorchmergebot@users.noreply.github.com
fc15825c91764aeef1e3c0d91ecb2bc9384037cb
d5fc28473e41a11e3ee793362e80f2db83b1d386
/tests/unit/cli.py
1f4810cb82091b028bc3a6c0ceb35d0071fb39b7
[ "Apache-2.0" ]
permissive
starbops/haas
71335ea29dbf06579381c7745176ee9f7c86d423
751d4fc27732ac7d660886b7c47948300c606460
refs/heads/master
2020-05-29T11:37:40.484664
2016-03-17T03:04:11
2016-03-17T03:04:11
53,773,276
0
0
null
2016-03-13T07:41:34
2016-03-13T07:41:34
null
UTF-8
Python
false
false
1,877
py
import pytest import tempfile import os import signal from subprocess import check_call, Popen from time import sleep config = """ [headnode] base_imgs = base-headnode, img1, img2, img3, img4 [database] uri = sqlite:///haas.db [extensions] haas.ext.auth.null = haas.ext.network_allocators.null = """ @pytest.fixture(autouse=True) def make_config(request): tmpdir = tempfile.mkdtemp() cwd = os.getcwd() os.chdir(tmpdir) with open('haas.cfg', 'w') as f: f.write(config) def cleanup(): os.remove('haas.cfg') os.remove('haas.db') os.chdir(cwd) os.rmdir(tmpdir) request.addfinalizer(cleanup) def test_init_db(): check_call(['haas', 'init_db']) def runs_for_seconds(cmd, seconds=1): """Test if the command ``cmd`` runs for at least ``seconds`` seconds. ``cmd`` is a list containing the name of a command and its arguments. ``seconds`` is a number of seconds (by default 1). ``run_for_seconds`` will execute ``cmd``, wait for ``seconds`` seconds, send SIGTERM to the process, and then wait() for it. If the exit status indicates that it stopped for any reason other than SIGTERM, ``run_for_seconds`` returns False, otherwise it returns True. This is useful to check that a server process does not immediately die on startup, though it's a bit of a hack --- checking rigorously would require extra knowledge of the workings of that process (hooray for the halting problem). """ proc = Popen(cmd) sleep(seconds) proc.terminate() status = proc.wait() return status == -signal.SIGTERM def test_serve(): check_call(['haas', 'init_db']) assert runs_for_seconds(['haas', 'serve', '5000'], seconds=1) def test_serve_networks(): check_call(['haas', 'init_db']) assert runs_for_seconds(['haas', 'serve_networks'], seconds=1)
[ "ian@zenhack.net" ]
ian@zenhack.net
dd1c933da5c0589e147cfac927c95849c3d02401
07c75f8717683b9c84864c446a460681150fb6a9
/back_cursor/S-scrapy/zhilian/zhilian/pipelines.py
ce305e87f0e027bad9b1bb740e708506227f2072
[]
no_license
laomu/py_1709
987d9307d9025001bd4386381899eb3778f9ccd6
80630e6ac3ed348a2a6445e90754bb6198cfe65a
refs/heads/master
2021-05-11T09:56:45.382526
2018-01-19T07:08:00
2018-01-19T07:08:00
118,088,974
0
0
null
null
null
null
UTF-8
Python
false
false
1,236
py
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html # 引入sqlalchemy模块 from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # 进入pymysql模块,用于替代sqlalchemy底层的mysqldb import pymysql pymysql.install_as_MySQLdb() class ZhilianPipeline(object): ''' 智联招聘爬虫管道模块,进行数据验证和存储 ''' def __init__(self): # 打开和数据库的连接引擎,获取连接会话对象 engine = create_engine("mysql://root:@localhost/py1709_spider?charset=utf8") Session = sessionmaker(bind=engine) self.session = Session() def process_item(self, item, spider): # 生成sql语句 zl_sql = "insert into jobs(job_name, company, salary) values('%s', '%s', '%s')" % \ (item['job_name'], item['company'], item['salary']) # 执行sql语句 self.session.execute(zl_sql) return item def close_spider(self, spider): # 提交数据并关闭数据库连接会话 self.session.commit() self.session.close()
[ "1007821300@qq.com" ]
1007821300@qq.com
7f5466358e1afe8affcb50531035a634f09d47f2
3e3bf98840d133e56f0d0eb16ba85678ddd6ca45
/.history/iss_20200102114952.py
b10444b89fa1b6c3096e6c74cc748aa78096e84a
[]
no_license
Imraj423/backend-iss-location-assessment
a05d3cc229a5fc4857483ae466348c1f8c23c234
b0565c089a445ccffcb8d0aab3c0be3bb0c1d5b8
refs/heads/master
2020-12-03T17:04:58.512124
2020-06-24T16:02:02
2020-06-24T16:02:02
231,400,854
0
0
null
2020-06-24T16:02:04
2020-01-02T14:43:44
null
UTF-8
Python
false
false
920
py
import requests import turtle import time screen = turtle.Screen() image = "iss.gif" screen.addshape(image) raf = turtle.Turtle() raf.shape(image) raf.setheading(90) raf.penup() screen.bgpic("map.gif") screen.screensize(4000, 3000) screen.setup(width=800, height=600, startx=0, starty=0) screen.exitonclick() while True: s = requests.get('http://api.open-notify.org/iss-now.json') s.raise_for_status result = json.loads(s.read()) print(s.text) #Let's extract the required information location = result["iss_position"] lat = location["latitude"] lon = location["longitude"] #Output informationon screen print("\nLatitude: " + str(lat)) print("Longitude: " + str(lon)) #Plot the ISS on the map raf.goto(lon, lat) #refresh position every 5 seconds time.sleep(5) r = requests.get('http://api.open-notify.org/astros.json') r.raise_for_status() print(r.text)
[ "dahqniss@gmail.com" ]
dahqniss@gmail.com
a802808f5187909756b07ccfd2d5e6956da34179
866b7169c069c153bacfa7961dce8909aa391faa
/blog/admin.py
e7b63a18af39c397f3075c4067021ab8e245f26b
[]
no_license
nikolasribeiro/pagina-jovenes-40
d2502b5d1569000eb7dc059f62b66978ea8642ab
5618cbc1ac03f6dd47eba6360dbdbb2ead70f268
refs/heads/main
2023-03-08T18:45:08.323432
2021-02-27T22:36:47
2021-02-27T22:36:47
336,833,170
0
0
null
null
null
null
UTF-8
Python
false
false
984
py
from django.contrib import admin from .models import Blog, Categoria # Import Export para guardar datos from import_export import resources from import_export.admin import ImportExportModelAdmin # Clases del import Export class BlogResource(resources.ModelResource): class Meta: model = Blog class BlogAdmin(ImportExportModelAdmin, admin.ModelAdmin): list_display = ( 'titulo_blog', 'subtitulo_blog', 'imagen_blog', 'slug', 'descripcion_breve', 'contenido_blog', 'autor', 'fecha_publicacion', ) resource_class = BlogResource class CategoriaResource(resources.ModelResource): class Meta: model = Blog class CategoriaAdmin(ImportExportModelAdmin, admin.ModelAdmin): list_display = ( 'nombre_categoria', ) resource_class = CategoriaResource # Register your models here. admin.site.register(Blog, BlogAdmin) admin.site.register(Categoria, CategoriaAdmin)
[ "nikolasribeiro2@outlook.com" ]
nikolasribeiro2@outlook.com
cf43f252196aa5e91466d9092828816745be5ca3
3851d5eafcc5fd240a06a7d95a925518412cafa0
/Django_Code/gs25/gs25/asgi.py
cb53a37a73a74e22db638ccc220b4e981d1b6bca
[]
no_license
Ikshansaleem/DjangoandRest
c0fafaecde13570ffd1d5f08019e04e1212cc2f3
0ccc620ca609b4ab99a9efa650b5893ba65de3c5
refs/heads/master
2023-01-31T04:37:57.746016
2020-12-10T06:27:24
2020-12-10T06:27:24
320,180,735
0
0
null
null
null
null
UTF-8
Python
false
false
401
py
""" ASGI config for gs25 project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs25.settings') application = get_asgi_application()
[ "ikshan3108@gmail.com" ]
ikshan3108@gmail.com
c67b2260e2dab5a6ed7a5447cb5d23fbae2047c7
07131e91dcf2529e9c7058f8a8f239d419c8f7e0
/1447.simplified-fractions.py
240c2196f222b390151a2ea343df329835207f3d
[]
no_license
Code-Wen/LeetCode_Notes
5194c5c5306cb9f4a0fac85e06fefe6c02d65d44
791fc1b43beef89d668788de6d12f5c643431b8f
refs/heads/master
2021-07-04T14:41:00.830723
2020-09-27T16:31:22
2020-09-27T16:31:22
178,456,323
1
0
null
null
null
null
UTF-8
Python
false
false
1,492
py
# # @lc app=leetcode id=1447 lang=python3 # # [1447] Simplified Fractions # # https://leetcode.com/problems/simplified-fractions/description/ # # algorithms # Medium (59.85%) # Likes: 50 # Dislikes: 6 # Total Accepted: 7.5K # Total Submissions: 12.5K # Testcase Example: '2\r' # # Given an integer n, return a list of all simplified fractions between 0 and 1 # (exclusive) such that the denominator is less-than-or-equal-to n. The # fractions can be in any order. # # # Example 1: # # # Input: n = 2 # Output: ["1/2"] # Explanation: "1/2" is the only unique fraction with a denominator # less-than-or-equal-to 2. # # Example 2: # # # Input: n = 3 # Output: ["1/2","1/3","2/3"] # # # Example 3: # # # Input: n = 4 # Output: ["1/2","1/3","1/4","2/3","3/4"] # Explanation: "2/4" is not a simplified fraction because it can be simplified # to "1/2". # # Example 4: # # # Input: n = 1 # Output: [] # # # # Constraints: # # # 1 <= n <= 100 # # # @lc code=start class Solution: def simplifiedFractions(self, n: int) -> List[str]: res = [] def gcd(n1,n2): if n1 < n2: n1, n2 = n2, n1 while n2 > 0: n2, n1 = n1%n2, n2 return n1 for denom in range(2, n+1): for numerator in range(1, denom): if gcd(denom, numerator) == 1: res.append(str(numerator)+"/"+str(denom)) return res # @lc code=end
[ "chenxu.wen.math@gmail.com" ]
chenxu.wen.math@gmail.com
6924ccab05426697554fea2c956596a548469849
4d2475135f5fc9cea73572b16f59bfdc7232e407
/prob130_surrounded_regions.py
cbb05c421a1740056d03c472983e5335f0f064ce
[]
no_license
Hu-Wenchao/leetcode
5fa0ae474aadaba372756d234bc5ec397c8dba50
31b2b4dc1e5c3b1c53b333fe30b98ed04b0bdacc
refs/heads/master
2021-06-24T04:57:45.340001
2017-06-17T02:33:09
2017-06-17T02:33:09
45,328,724
0
0
null
null
null
null
UTF-8
Python
false
false
1,128
py
""" Given a 2D board containing 'X' and 'O', capture all regions surrounded by 'X'. A region is captured by flipping all 'O's into 'X's in that surrounded region. For example, X X X X X O O X X X O X X O X X After running your function, the board should be: X X X X X X X X X X X X X O X X """ class Solution(object): def solve(self, board): """ :type board: List[List[str]] :rtype: void Do not return anything, modify board in-place instead. """ if not board: return m, n = len(board), len(board[0]) stack = [(i, 0) for i in range(m)] + [(i, n-1) for i in range(m)] stack += [(0, j) for j in range(1, n-1)] + \ [(m-1, j) for j in range(1, n-1)] while stack: i, j = stack.pop() if 0 <= i < m and 0 <= j < n and board[i][j] == 'O': board[i][j] = 'S' stack.append((i-1, j)) stack.append((i+1, j)) stack.append((i, j+1)) stack.append((i, j-1)) board[:] = [['XO'[node=='S'] for node in row] for row in board]
[ "huwcbill@gmail.com" ]
huwcbill@gmail.com
73e6a2ec930cc79de0a569f1fe7e1d0881098d19
4a52362d2a46c747af74e3c321b1bd9d73bd0116
/virtual/bin/static
fcf01443e92188fc7d4641feefe749e4664b5659
[ "MIT" ]
permissive
TonyKioko/PichaZa
1420242b48c204637a166778084aaa4cb3776938
8e2e8f3d002a624fe64ce089e4581265080975d6
refs/heads/master
2020-03-30T01:31:53.034302
2018-10-03T07:35:30
2018-10-03T07:35:30
150,580,180
0
0
null
null
null
null
UTF-8
Python
false
false
244
#!/home/tony/Desktop/PichaZa/virtual/bin/python # -*- coding: utf-8 -*- import re import sys from static import command if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(command())
[ "tonykioko384@gmail.com" ]
tonykioko384@gmail.com
a754658047ec2d9bd7b8997aa6df168e5080f297
0fc2b99fd8414dbce5f1f6057b9b800c968d5d05
/tests/pysge/test_pysge.py
a54c3b00256e1b2b635b0b89ce3728218a840e3b
[ "MIT" ]
permissive
widdowquinn/lpbio
9df898cb9580f62da1f66d5736cbf7a984633561
8b95642396d05a56c1c54389e3de6d88d7cbffb5
refs/heads/master
2020-03-29T02:08:56.675473
2019-11-07T14:27:44
2019-11-07T14:27:44
149,422,654
0
0
null
null
null
null
UTF-8
Python
false
false
4,938
py
# -*- coding: utf-8 -*- """Tests of SGE job submission""" import shutil import time import unittest import pytest from lpbio import pysge class TestPysge(unittest.TestCase): """Class collecting tests for pysge""" @staticmethod def test_create_job(): """Create Job for SGE-like scheduler""" pysge.Job(name="test_job", command="echo {}".format(time.asctime())) @staticmethod def test_create_job_dependencies(): """Create Job with dependencies for SGE-like scheduler""" job = pysge.Job( name="test_job_dependencies", command="echo {}".format(time.asctime()) ) depjobs = [ pysge.Job( name="dependency {}".format(i), command="echo {}".format(time.asctime()) ) for i in range(3) ] [job.add_dependency(depjob) for depjob in depjobs] @staticmethod def test_create_jobgroup(): """Create parameter-sweep JobGroup for SGE-like scheduler""" args = {"arg1": ["a", "b", "c"]} pysge.JobGroup(name="test_jobgroup", command="echo", arguments=args) @staticmethod def test_create_jobgroup_dependencies(): """Create parameter-sweep JobGroup with dependencies for SGE-like scheduler""" args = {"arg1": ["a", "b", "c"]} jobgroup = pysge.JobGroup( name="test_jobgroup_dependencies", command="echo", arguments=args ) depjobs = [ pysge.Job( name="dependency {}".format(i), command="echo {}".format(time.asctime()) ) for i in range(3) ] for depjob in depjobs: jobgroup.add_dependency(depjob) @pytest.mark.skipif( shutil.which(pysge.QSUB_DEFAULT) is None, reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT), ) def test_create_run_job(self): """Create and run Job with SGE-like scheduler""" job = pysge.Job( name="test_run_job", command="echo {} \\(test_create_run_job\\)".format(time.asctime()), ) pysge.build_and_submit_jobs(job) @pytest.mark.skipif( shutil.which(pysge.QSUB_DEFAULT) is None, reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT), ) def test_create_run_job_badname(self): """Create and run a Job using SGE-like scheduler This job has undesirable characters in the name """ job = pysge.Job( name="test run job #|!;,.?", command="echo This was a bad name! \\(test_create_run_job_badname\\)", ) pysge.build_and_submit_jobs(job) @pytest.mark.skipif( shutil.which(pysge.QSUB_DEFAULT) is None, reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT), ) def test_create_run_jobgroup(self): """Create and run JobGroup with SGE-like scheduler""" args = {"arg1": ["a", "b", "c"]} jobgroup = pysge.JobGroup( name="test_run_jobgroup", command="echo $arg1 \\(test_create_run_jobgroup\\)", arguments=args, ) pysge.build_and_submit_jobs(jobgroup) @pytest.mark.skipif( shutil.which(pysge.QSUB_DEFAULT) is None, reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT), ) def test_create_run_job_dependencies(self): """Create and run Job with dependencies for SGE-like scheduler""" job = pysge.Job( name="test_run_job_dependencies", command="echo {} \\(test_create_run_job_dependencies\\)".format( time.asctime() ), ) depjobs = [ pysge.Job( name="testjob_dependency_{}".format(i), command="echo {}".format(time.asctime()), ) for i in range(3) ] for depjob in depjobs: job.add_dependency(depjob) pysge.build_and_submit_jobs([job] + depjobs) @pytest.mark.skipif( shutil.which(pysge.QSUB_DEFAULT) is None, reason="qsub executable ({}) could not be found".format(pysge.QSUB_DEFAULT), ) def test_create_run_jobgroup_dependencies(self): """Create parameter-sweep JobGroup with dependencies for SGE-like scheduler""" args = {"arg1": ["a", "b", "c"]} jobgroup = pysge.JobGroup( name="test_run_jobgroup_dependencies", command="echo $arg1 \\(test_create_run_jobgroup_dependencies\\)", arguments=args, ) depjobs = [ pysge.Job( name="testjobgroup_dependency_{}".format(i), command="echo {}".format(time.asctime()), ) for i in range(3) ] for depjob in depjobs: jobgroup.add_dependency(depjob) pysge.build_and_submit_jobs([jobgroup] + depjobs)
[ "leighton.pritchard@hutton.ac.uk" ]
leighton.pritchard@hutton.ac.uk
d2297f166ccb241eecdd47f1c9b46e9d9faad85c
11a246743073e9d2cb550f9144f59b95afebf195
/kattis/chopin.py
67513e777cb40682ed9aa5468bad04aa7e93f38a
[]
no_license
ankitpriyarup/online-judge
b5b779c26439369cedc05c045af5511cbc3c980f
8a00ec141142c129bfa13a68dbf704091eae9588
refs/heads/master
2020-09-05T02:46:56.377213
2019-10-27T20:12:25
2019-10-27T20:12:25
219,959,932
0
1
null
2019-11-06T09:30:58
2019-11-06T09:30:57
null
UTF-8
Python
false
false
538
py
import sys def main(): tc = 1 flip = {'major': {}, 'minor': {}} flip['A#'] = 'Bb' flip['Bb'] = 'A#' flip['C#'] = 'Db' flip['Db'] = 'C#' flip['D#'] = 'Eb' flip['Eb'] = 'D#' flip['F#'] = 'Gb' flip['Gb'] = 'F#' flip['G#'] = 'Ab' flip['Ab'] = 'G#' for line in sys.stdin: note, tone = line.strip().split() if note in flip: print('Case {}: {} {}'.format(tc, flip[note], tone)) else: print('Case {}: UNIQUE'.format(tc)) tc += 1 main()
[ "arnavsastry@gmail.com" ]
arnavsastry@gmail.com
e0049f98e7e82d3f7e4ed64035a39d25d6443025
d1c67f2031d657902acef4411877d75b992eab91
/swagger_client/models/list_escalations_response.py
f2b5efd0d2aa78275881ff42e8022c09b496ba2d
[]
no_license
Certn/opsgenie-python
c6e6a7f42394499e5224d679cc9a449042fcf9c3
bd5f402f97d591e4082b38c938cbabca4cf29787
refs/heads/master
2023-01-01T10:45:13.132455
2020-10-27T17:40:01
2020-10-27T17:40:01
307,769,432
0
0
null
null
null
null
UTF-8
Python
false
false
4,621
py
# coding: utf-8 """ Opsgenie REST API Opsgenie OpenAPI Specification # noqa: E501 OpenAPI spec version: 2.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ListEscalationsResponse(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'request_id': 'str', 'took': 'float', 'data': 'list[Escalation]' } attribute_map = { 'request_id': 'requestId', 'took': 'took', 'data': 'data' } def __init__(self, request_id=None, took=0.0, data=None): # noqa: E501 """ListEscalationsResponse - a model defined in Swagger""" # noqa: E501 self._request_id = None self._took = None self._data = None self.discriminator = None self.request_id = request_id self.took = took if data is not None: self.data = data @property def request_id(self): """Gets the request_id of this ListEscalationsResponse. # noqa: E501 :return: The request_id of this ListEscalationsResponse. # noqa: E501 :rtype: str """ return self._request_id @request_id.setter def request_id(self, request_id): """Sets the request_id of this ListEscalationsResponse. :param request_id: The request_id of this ListEscalationsResponse. # noqa: E501 :type: str """ if request_id is None: raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501 self._request_id = request_id @property def took(self): """Gets the took of this ListEscalationsResponse. # noqa: E501 :return: The took of this ListEscalationsResponse. # noqa: E501 :rtype: float """ return self._took @took.setter def took(self, took): """Sets the took of this ListEscalationsResponse. :param took: The took of this ListEscalationsResponse. # noqa: E501 :type: float """ if took is None: raise ValueError("Invalid value for `took`, must not be `None`") # noqa: E501 self._took = took @property def data(self): """Gets the data of this ListEscalationsResponse. # noqa: E501 :return: The data of this ListEscalationsResponse. # noqa: E501 :rtype: list[Escalation] """ return self._data @data.setter def data(self, data): """Sets the data of this ListEscalationsResponse. :param data: The data of this ListEscalationsResponse. # noqa: E501 :type: list[Escalation] """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ListEscalationsResponse, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ListEscalationsResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "john@oram.ca" ]
john@oram.ca
8fde5030c14e3eb60ee0aae8b332acfc19a8dc8b
758bf41e46a3093f4923af603f1f7f8063408b9c
/website/testFromRemoteRepo/_bsch3398/museum/python/user.py
719bf82b3ee747cef3765681b6702717b4d1b1a9
[]
no_license
mpetyx/mpetyx.com
4033d97b21c9227a6ba505980fd0c1b57254e8fb
d50c379b4fe09e0135656573f7049225fc90ae36
refs/heads/master
2021-01-10T19:50:15.488371
2014-01-22T09:04:14
2014-01-22T09:04:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,103
py
#!/usr/local/bin/python from util import * import connect import sessions #django for templates only from django.conf import settings from django.template import Template, Context methods = ("login", "logout", "add", "delete", "view", "update") def login(fields, cookie): if fields.has_key('user') and fields.has_key('password'): user = fields['user'].value #.value password = fields['password'].value #.value db = connect.connect() temp = db.read('users', {'userid': user}) #print temp # testing # user does exist and password matches if temp and temp[0]['PASSWORD'] == password: # create session cookie sid = sessions.create(user) newcookie = 'id=' + str(sid) # redirect to catalogue menu page t = loader('loggedin') c = Context({}) #TODO print http_response(t.render(c), newcookie) # no match else: t = loader('login') c = Context({'errors': 'Incorrect username or password. Also, I slept with your sister.'}) print http_response(t.render(c)) # go back to login page with error message else: t = loader('login') c = Context({}) print http_response(t.render(c)) def logout(fields, cookie): pass def add(fields, cookie): pass def delete(fields, cookie): pass def view(fields, cookie): pass def update(fields, cookie): pass def run(fields, cookie): if fields.has_key('method'): method = fields['method'].value if method in methods: if method == "login": login(fields, cookie) elif method == "logout": logout(fields, cookie) elif method == "add": add(fields, cookie) elif method == "delete": delete(fields, cookie) elif method == "view": view(fields, cookie) elif method == "update": update(fields, cookie) if __name__ == "__main__": pass
[ "mpetyx@gmail.com" ]
mpetyx@gmail.com
49ef083dd7476bf8ff926498dd04773df7b9d6f1
f7c4084ddb4b26ac6005e569c907e94ce63f9993
/项目/api_sh/data_01/migrations/0001_initial.py
991a987ed1f657a88f79ba7cc71819dc786de534
[]
no_license
LDZ-RGZN/-
01caeb008bab16e1f7dd1c02137def2e030e1636
696be7f7a33f009eac92dff504365eb386060df1
refs/heads/master
2020-04-13T14:56:43.565470
2019-01-13T11:19:28
2019-01-13T11:19:28
162,986,641
0
0
null
null
null
null
UTF-8
Python
false
false
3,352
py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-01-08 11:35 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='content', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100, verbose_name='章节名称')), ('bookname', models.CharField(max_length=150, verbose_name='书名')), ('author', models.CharField(max_length=100, verbose_name='作者')), ('WordNumber', models.CharField(max_length=100, verbose_name='本章字数')), ('FaBuData', models.CharField(max_length=200, verbose_name='发布时间')), ('content', models.TextField(verbose_name='内容')), ], options={ 'verbose_name': '详情', 'verbose_name_plural': '详情', }, ), migrations.CreateModel( name='liebiao', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('bookname', models.CharField(max_length=200, verbose_name='书名')), ('img_url', models.TextField(verbose_name='图片链接')), ('author', models.CharField(max_length=100, verbose_name='作者')), ('State', models.CharField(max_length=100, verbose_name='状态')), ('WordNumber', models.CharField(max_length=100, verbose_name='字数')), ('introduce', models.TextField(verbose_name='简介')), ], options={ 'verbose_name': '列表', 'verbose_name_plural': '列表', }, ), migrations.CreateModel( name='zhangjie', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, verbose_name='章节名称')), ('zj_link', models.TextField(verbose_name='链接')), ('mf', models.CharField(max_length=100, verbose_name='状态')), ], options={ 'verbose_name': '章节', 'verbose_name_plural': '章节', }, ), migrations.CreateModel( name='zonglei', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('category', models.CharField(max_length=100, verbose_name='类别')), ('CGurl', models.TextField(verbose_name='类别链接')), ], options={ 'verbose_name': '总类别', 'verbose_name_plural': '总类别', }, ), migrations.AddField( model_name='liebiao', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='data_01.zonglei', verbose_name='类别'), ), ]
[ "2654213432@qq.com" ]
2654213432@qq.com
a448f9c388862144f132bcfef5eeb24bb2bad601
638af6b8c580eeae23fc1034882c4b514195137a
/Packages/vcs_legacy/Test/test_ps_hatching.py
4716cae2fa5f0c4e53cb057cd6ace51ee97ef4e6
[]
no_license
doutriaux1/uvcdat
83684a86b514b8cac4d8900a503fc13d557fc4d2
37e9635f988696c346b4c3cdb49144d1e21dab5d
refs/heads/master
2021-01-17T07:57:22.897539
2015-02-02T22:52:12
2015-02-02T22:52:12
14,878,320
1
0
null
2015-02-19T20:54:25
2013-12-02T23:44:46
C
UTF-8
Python
false
false
792
py
# Adapted for numpy/ma/cdms2 by convertcdms.py import cdms2 as cdms,vcs_legacy,sys,time,support,os bg=support.bg x=vcs_legacy.init() x.portrait() #x.setdefaultfont(2) f=cdms.open(os.path.join(cdms.__path__[0],'..','..','..','..','sample_data','clt.nc')) s=f('clt') iso = x.createisofill('my') levs = range(0,95,5) #print len(levs) colors = vcs_legacy.getcolors(levs) hatch = [] iso.levels=levs iso.fillareacolors=colors iso.fillareastyle='pattern' iso.fillareastyle='hatch' iso.fillareaindices=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18] #print iso.fillareaindices #iso.fillareaindices=[17,]*21 #print colors #iso.list() l = x.createline('my') l.x=[.001,.999,.999,.001,.001] l.y=[.001,.001,.999,.999,.001] x.plot(l,bg=bg) support.check_plot(x) x.plot(s,iso,bg=bg) support.check_plot(x)
[ "doutriaux1@llnl.gov" ]
doutriaux1@llnl.gov
c3fc957bd8157028fc72a63f5e48786b003b968b
321b4ed83b6874eeb512027eaa0b17b0daf3c289
/77/77.combinations.664188670.Wrong-Answer.leetcode.python3.py
f9d4b9c263f19faa1a2c5bbe5831661b9d091441
[]
no_license
huangyingw/submissions
7a610613bdb03f1223cdec5f6ccc4391149ca618
bfac1238ecef8b03e54842b852f6fec111abedfa
refs/heads/master
2023-07-25T09:56:46.814504
2023-07-16T07:38:36
2023-07-16T07:38:36
143,352,065
0
1
null
null
null
null
UTF-8
Python
false
false
353
py
class Solution(object): def combine(self, n, k): res = [] self.get_combine(res, [], n, k, 1) return res def get_combine(self, res, prefix, n, k, start): if k == 0: res.append(list(prefix)) for idx in range(start, n + 1): self.get_combine(res, prefix + [idx], n, k - 1, start + 1)
[ "huangyingw@gmail.com" ]
huangyingw@gmail.com
35651e65aea695ef813cc1faf53c12e1c4efeff5
cbe264842df4eae3569b28ed4aae9489014ed23c
/codeit/algorithm/greedy_min_fee.py
cc107882b08662f5008275ceceac29457af32609
[ "MIT" ]
permissive
zeroam/TIL
31e176c2f4c3e1ef72b1155353690cc2f7160f96
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
refs/heads/master
2021-07-23T01:43:34.135033
2021-07-10T06:47:17
2021-07-10T06:47:17
167,952,375
1
0
null
null
null
null
UTF-8
Python
false
false
521
py
def min_fee(pages_to_print): sorted_pages_to_print = sorted(pages_to_print) total_fee = 0 while sorted_pages_to_print: size = len(sorted_pages_to_print) minute = sorted_pages_to_print.pop(0) total_fee += size * minute return total_fee if __name__ == '__main__': from util import test_value test_value(min_fee([6, 11, 4, 1]), 39) test_value(min_fee([3, 2, 1]), 10) test_value(min_fee([3, 1, 4, 3, 2]), 32) test_value(min_fee([8, 4, 2, 3, 9, 23, 6, 8]), 188)
[ "imdff0803@gmail.com" ]
imdff0803@gmail.com
df3c79870df90f3021b5a630eb28b3efd6fa07c0
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
/python/numenta_nupic/nupic-master/tests/unit/nupic/algorithms/sp_overlap_test.py
1ca3af5d62178bc471696f4ea91f01904d2a56cf
[]
no_license
LiuFang816/SALSTM_py_data
6db258e51858aeff14af38898fef715b46980ac1
d494b3041069d377d6a7a9c296a14334f2fa5acc
refs/heads/master
2022-12-25T06:39:52.222097
2019-12-12T08:49:07
2019-12-12T08:49:07
227,546,525
10
7
null
2022-12-19T02:53:01
2019-12-12T07:29:39
Python
UTF-8
Python
false
false
7,048
py
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ This is a legacy test from trunk and may replicate spatial pooler tests. The allocation of cells to new patterns is explored. After all the cells have been allocated, cells must be reused. This test makes sure that the allocation of new cells is such that we achieve maximum generality and predictive power. Note: Since the sp pooler has 2048 cells with a sparsity of 40 cells active per iteration, 100% allocation is reached at the 51st unique pattern. """ import unittest2 as unittest import random as rnd import time import numpy from nupic.bindings.math import GetNTAReal from nupic.encoders import scalar from nupic.bindings.algorithms import SpatialPooler realDType = GetNTAReal() SEED = 42 class TestSPFrequency(unittest.TestCase): def testCategory(self): """Test that the most frequent possible option is chosen for a scalar encoded field """ self.frequency(n=100, w=21, seed=SEED, numColors=90, encoder = 'scalar') def testScalar(self): """Test that the most frequent possible option is chosen for a category encoded field """ self.frequency(n=30, w=21, seed=SEED, numColors=90, encoder = 'category') @unittest.skip("Not working...") def testScalarLong(self): """Test that the most frequent possible option is chosen for a scalar encoded field. Run through many different numbers of patterns and random seeds""" for n in [52, 70, 80, 90, 100, 110]: self.frequency(n=100, w=21, seed=SEED, numColors=n, encoder='scalar') @unittest.skip("Not working...") def testCategoryLong(self): """Test that the most frequent possible option is chosen for a category encoded field. Run through many different numbers of patterns and random seeds""" for n in [52, 70, 80, 90, 100, 110]: self.frequency(n=100, w=21, seed=SEED, numColors=n) def frequency(self, n=15, w=7, columnDimensions = 2048, numActiveColumnsPerInhArea = 40, stimulusThreshold = 0, spSeed = 1, spVerbosity = 0, numColors = 2, seed=42, minVal=0, maxVal=10, encoder = 'category', forced=True): """ Helper function that tests whether the SP predicts the most frequent record """ print "\nRunning SP overlap test..." print encoder, 'encoder,', 'Random seed:', seed, 'and', numColors, 'colors' #Setting up SP and creating training patterns # Instantiate Spatial Pooler spImpl = SpatialPooler( columnDimensions=(columnDimensions, 1), inputDimensions=(1, n), potentialRadius=n/2, numActiveColumnsPerInhArea=numActiveColumnsPerInhArea, spVerbosity=spVerbosity, stimulusThreshold=stimulusThreshold, potentialPct=0.5, seed=spSeed, globalInhibition=True, ) rnd.seed(seed) numpy.random.seed(seed) colors = [] coincs = [] reUsedCoincs = [] spOutput = [] patterns = set([]) # Setting up the encodings if encoder=='scalar': enc = scalar.ScalarEncoder(name='car', w=w, n=n, minval=minVal, maxval=maxVal, periodic=False, forced=True) # forced: it's strongly recommended to use w>=21, in the example we force skip the check for readibility for y in xrange(numColors): temp = enc.encode(rnd.random()*maxVal) colors.append(numpy.array(temp, dtype=realDType)) else: for y in xrange(numColors): sdr = numpy.zeros(n, dtype=realDType) # Randomly setting w out of n bits to 1 sdr[rnd.sample(xrange(n), w)] = 1 colors.append(sdr) # Training the sp print 'Starting to train the sp on', numColors, 'patterns' startTime = time.time() for i in xrange(numColors): # TODO: See https://github.com/numenta/nupic/issues/2072 spInput = colors[i] onCells = numpy.zeros(columnDimensions) spImpl.compute(spInput, True, onCells) spOutput.append(onCells.tolist()) activeCoincIndices = set(onCells.nonzero()[0]) # Checking if any of the active cells have been previously active reUsed = activeCoincIndices.intersection(patterns) if len(reUsed) == 0: # The set of all coincidences that have won at least once coincs.append((i, activeCoincIndices, colors[i])) else: reUsedCoincs.append((i, activeCoincIndices, colors[i])) # Adding the active cells to the set of coincs that have been active at # least once patterns.update(activeCoincIndices) if (i + 1) % 100 == 0: print 'Record number:', i + 1 print "Elapsed time: %.2f seconds" % (time.time() - startTime) print len(reUsedCoincs), "re-used coinc(s)," # Check if results match expectations summ = [] for z in coincs: summ.append(sum([len(z[1].intersection(y[1])) for y in reUsedCoincs])) zeros = len([x for x in summ if x==0]) factor = max(summ)*len(summ)/sum(summ) if len(reUsed) < 10: self.assertLess(factor, 41, "\nComputed factor: %d\nExpected Less than %d" % ( factor, 41)) self.assertLess(zeros, 0.99*len(summ), "\nComputed zeros: %d\nExpected Less than %d" % ( zeros, 0.99*len(summ))) else: self.assertLess(factor, 8, "\nComputed factor: %d\nExpected Less than %d" % ( factor, 8)) self.assertLess(zeros, 12, "\nComputed zeros: %d\nExpected Less than %d" % ( zeros, 12)) def hammingDistance(s1, s2): assert len(s1) == len(s2) return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) if __name__ == '__main__': unittest.main()
[ "659338505@qq.com" ]
659338505@qq.com
d56407a6b0db2c1f4b194d83c021e874ad9414d2
4e7db10524c938c8c6e687521def2889e20ec646
/P16/1-7.py
62f18ba79452ebca3058388918cdc5cc264b5f6c
[]
no_license
mpigrobot/python
e5cf60ca438e0d5e63a1e87a266a9e255bc07271
bf9262657a7401f37de38318db768e630fab97a9
refs/heads/master
2020-03-15T15:37:03.228080
2018-03-31T07:33:07
2018-03-31T07:33:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
520
py
#!/usr/bin/env python # coding:utf-8 import sys sys.path.append("../") import jieba print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择")) jieba.load_userdict( 'C:\Users\Administrator\Desktop\dir1.txt') print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择")) # jieba.suggest_freq('君意', True) jieba.add_word("君意", freq = 20000, tag = None) print ", ".join(jieba.cut("大连美容美发学校中君意是你值得信赖的选择"))
[ "noreply@github.com" ]
mpigrobot.noreply@github.com
eda674ee22d94b233e6ae2676f25b8efc5cdcd5b
ca5e3595a9949abba08c642842166f82d768c153
/modulation.py
f70357fbf07fdbf03eccb00b5de4dd5781fa470c
[]
no_license
MaksimKulya/PromaX
f36980300ec564a0c8d523f3e07ebf9ed2a9b262
e723cf4390f9cdcc4bbda627dff90a2185322a54
refs/heads/master
2023-06-25T13:13:09.968726
2021-07-13T15:09:37
2021-07-13T15:09:37
385,638,675
0
0
null
null
null
null
UTF-8
Python
false
false
612
py
import matplotlib.pyplot as plt import math import cmath import pylab from matplotlib import mlab from PIL import Image import numpy as np import scipy.constants import numba as nb pi=math.pi @nb.njit def modulation(Nx,Ny,am,ph,nu,n,c,h): AM = np.zeros(shape=(nu.shape[0], Nx, Ny)) PH = np.zeros(shape=(nu.shape[0], Nx, Ny)) G_object = np.zeros(shape=(nu.shape[0], Nx, Ny), dtype=nb.types.complex64) for k in range(nu.shape[0]): AM[k, :, :] = am PH[k, :, :] = ph * (2*pi*nu[k]*(n-1)*h/c) G_object[k, :, :] = AM[k, :, :] * np.exp(1j * PH[k, :, :]) return G_object
[ "maxk2350@yandex.ru" ]
maxk2350@yandex.ru
182d7ea8a9a297586f8be5758698b81f665b8e65
5c7da7dabdc076ad7113ccd20561a8bbf5f9a70e
/portfolios/migrations/0007_auto_20200215_1347.py
9bae61e5b98ca60a0561156c6c192a93b6cb9cd2
[]
no_license
aqcloudacio/cloudaciofeez
2499fb5fc5334fa871daab2abea6c34bfa8c7667
8399560ece9aa10a6d6801f42c027dca26a65936
refs/heads/master
2023-02-27T22:36:20.501159
2021-02-11T00:03:46
2021-02-11T00:03:46
337,887,413
0
0
null
null
null
null
UTF-8
Python
false
false
788
py
# Generated by Django 2.2.7 on 2020-02-15 02:47 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('portfolios', '0006_auto_20200215_1344'), ] operations = [ migrations.AlterField( model_name='portfolio', name='platform', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='portfolios', to='platforms.Platform'), ), migrations.AlterField( model_name='portfolio', name='platform_fee_group', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='portfolios', to='platforms.PlatformFees'), ), ]
[ "alejandro.quintero@clouxter.com" ]
alejandro.quintero@clouxter.com
855afa2e49764022f6489b06d28787448896f33d
84a96dbd96e926ebb5c658e3cb897db276c32d6c
/tensorflow/python/keras/optimizer_v2/adamax.py
9166f637c1e9a7f1cefb35436a6db667ff59ab84
[ "Apache-2.0" ]
permissive
MothCreations/gavlanWheels
bc9189092847369ad291d1c7d3f4144dd2239359
01d8a43b45a26afec27b971f686f79c108fe08f9
refs/heads/master
2022-12-06T09:27:49.458800
2020-10-13T21:56:40
2020-10-13T21:56:40
249,206,716
6
5
Apache-2.0
2022-11-21T22:39:47
2020-03-22T14:57:45
C++
UTF-8
Python
false
false
8,043
py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adamax for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras import backend_config from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import keras_export @keras_export('keras.optimizers.Adamax') class Adamax(optimizer_v2.OptimizerV2): """Optimizer that implements the Adamax algorithm. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam, specially in models with embeddings. Initialization: ``` m_0 <- 0 (Initialize initial 1st moment vector) v_0 <- 0 (Initialize the exponentially weighted infinity norm) t <- 0 (Initialize timestep) ``` The update rule for `variable` with gradient `g` uses an optimization described at the end of section 7.1 of the paper: ``` t <- t + 1 m_t <- beta1 * m_{t-1} + (1 - beta1) * g v_t <- max(beta2 * v_{t-1}, abs(g)) variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) ``` Similar to AdamOptimizer, the epsilon is added for numerical stability (especially to get rid of division by zero when v_t = 0). Contrast to AdamOptimizer, the sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) only updates variable slices and corresponding `m_t`, `v_t` terms when that part of the variable was used in the forward pass. This means that the sparse behavior is contrast to the dense behavior (similar to some momentum implementations which ignore momentum unless a variable slice was actually used). References see Section 7 of [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) ([pdf](http://arxiv.org/pdf/1412.6980.pdf)). """ _HAS_ALL_REDUCE_SUM_GRAD = True def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7, name='Adamax', **kwargs): """Construct a new Adamax optimizer. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to "Adamax". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. """ super(Adamax, self).__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self.epsilon = epsilon or backend_config.epsilon() def _create_slots(self, var_list): # Separate for-loops to respect the ordering of slot variables from v1. for var in var_list: self.add_slot(var, 'm') # Create slots for the first moments. for var in var_list: self.add_slot(var, 'v') # Create slots for the second moments. def _prepare_local(self, var_device, var_dtype, apply_state): super(Adamax, self)._prepare_local(var_device, var_dtype, apply_state) local_step = math_ops.cast(self.iterations + 1, var_dtype) beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype)) beta_1_power = math_ops.pow(beta_1_t, local_step) lr_t = apply_state[(var_device, var_dtype)]['lr_t'] apply_state[(var_device, var_dtype)].update( dict( neg_scaled_lr=-lr_t / (1 - beta_1_power), epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_1_power=beta_1_power, one_minus_beta_1_t=1 - beta_1_t, beta_2_t=beta_2_t, zero=array_ops.zeros((), dtype=dtypes.int64))) def _resource_apply_dense(self, grad, var, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') return training_ops.resource_apply_ada_max( var.handle, m.handle, v.handle, coefficients['beta_1_power'], coefficients['lr_t'], coefficients['beta_1_t'], coefficients['beta_2_t'], coefficients['epsilon'], grad, use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): var_device, var_dtype = var.device, var.dtype.base_dtype coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, 'm') m_slice = array_ops.gather(m, indices, axis=coefficients['zero']) m_t_slice = (m_slice * coefficients['beta_1_t'] + grad * coefficients['one_minus_beta_1_t']) with ops.control_dependencies([m_t_slice]): m_t = self._resource_scatter_update(m, indices, m_t_slice) # u_t = max(beta2 * u, abs(g_t)) v = self.get_slot(var, 'v') v_slice = array_ops.gather(v, indices, axis=coefficients['zero']) v_t_slice = math_ops.maximum(v_slice * coefficients['beta_2_t'], math_ops.abs(grad)) with ops.control_dependencies([v_t_slice]): v_t = self._resource_scatter_update(v, indices, v_t_slice) # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t var_slice = coefficients['neg_scaled_lr'] * ( m_t_slice / (v_t_slice + coefficients['epsilon'])) with ops.control_dependencies([var_slice]): var_update = self._resource_scatter_add(var, indices, var_slice) return control_flow_ops.group(*[var_update, m_t, v_t]) def get_config(self): config = super(Adamax, self).get_config() config.update({ 'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, }) return config
[ "gardener@tensorflow.org" ]
gardener@tensorflow.org
5e873a127a5b816d0d8b4e502262fb066ca2608d
2d82d4c6574bd6d32f2cf1c781615f7951f55f66
/muntjac/demo/sampler/features/trees/TreeSingleSelect.py
d07a2e355077b83ac72f563a77bb096ebf2612c1
[ "Apache-2.0" ]
permissive
metaperl/muntjac
f83f745ee03942a61af92ee7fba7285aa9c46f3c
8db97712edd81b4d25deaaa48587d2a08010f2c8
refs/heads/master
2021-01-15T22:04:25.057862
2012-11-09T03:52:59
2012-11-09T03:52:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
887
py
from muntjac.ui.tree import Tree from muntjac.demo.sampler.features.trees.TreeMultiSelect import TreeMultiSelect from muntjac.demo.sampler.features.trees.TreeActions import TreeActions from muntjac.demo.sampler.APIResource import APIResource from muntjac.demo.sampler.Feature import Feature, Version class TreeSingleSelect(Feature): def getSinceVersion(self): return Version.OLD def getName(self): return 'Tree, single selection' def getDescription(self): return ('In this example, you can select any single tree node and ' 'modify its \'name\' property. Click again to de-select.') def getRelatedAPI(self): return [APIResource(Tree)] def getRelatedFeatures(self): return [TreeMultiSelect, TreeActions] def getRelatedResources(self): # TODO Auto-generated method stub return None
[ "r.w.lincoln@gmail.com" ]
r.w.lincoln@gmail.com
3fb6faabd50d6e4fec8f682bbab921a976447f7b
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
/IronPythonStubs/release/stubs.min/Autodesk/Revit/UI/__init___parts/TaskDialogResult.py
6464317c49a8ed24e79672214c635e272cf50cb8
[ "MIT" ]
permissive
shnlmn/Rhino-Grasshopper-Scripts
a9411098c5d1bbc55feb782def565d535b27b709
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
refs/heads/master
2020-04-10T18:59:43.518140
2020-04-08T02:49:07
2020-04-08T02:49:07
161,219,695
11
2
null
null
null
null
UTF-8
Python
false
false
1,153
py
class TaskDialogResult(Enum,IComparable,IFormattable,IConvertible): """ Enum to specify the task dialog result. enum TaskDialogResult,values: Cancel (2),Close (8),CommandLink1 (1001),CommandLink2 (1002),CommandLink3 (1003),CommandLink4 (1004),No (7),None (0),Ok (1),Retry (4),Yes (6) """ def __eq__(self,*args): """ x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """ pass def __format__(self,*args): """ __format__(formattable: IFormattable,format: str) -> str """ pass def __ge__(self,*args): pass def __gt__(self,*args): pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __le__(self,*args): pass def __lt__(self,*args): pass def __ne__(self,*args): pass def __reduce_ex__(self,*args): pass def __str__(self,*args): pass Cancel=None Close=None CommandLink1=None CommandLink2=None CommandLink3=None CommandLink4=None No=None None=None Ok=None Retry=None value__=None Yes=None
[ "magnetscoil@gmail.com" ]
magnetscoil@gmail.com
a6a2cdc64be78791ddd99b63741b386489d36ecf
5746d26f891270c1bb407a244d9a942534298d96
/fastreid/data/build.py
e7005a90fcb391336d9acc7f3280546059c9cbf6
[ "Apache-2.0" ]
permissive
winterxx/fast-reid
1463253c43876249dd55a3adb0a3e71fa8037aa3
727a7468311949efbbc7be360c2c1afaf440bb22
refs/heads/master
2022-10-20T17:50:39.245472
2020-06-16T03:46:22
2020-06-16T03:46:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,131
py
# encoding: utf-8 """ @author: l1aoxingyu @contact: sherlockliao01@gmail.com """ import torch from torch._six import container_abcs, string_classes, int_classes from torch.utils.data import DataLoader from . import samplers from .common import CommDataset from .datasets import DATASET_REGISTRY from .transforms import build_transforms def build_reid_train_loader(cfg): train_transforms = build_transforms(cfg, is_train=True) train_items = list() for d in cfg.DATASETS.NAMES: dataset = DATASET_REGISTRY.get(d)(combineall=cfg.DATASETS.COMBINEALL) dataset.show_train() train_items.extend(dataset.train) train_set = CommDataset(train_items, train_transforms, relabel=True) num_workers = cfg.DATALOADER.NUM_WORKERS batch_size = cfg.SOLVER.IMS_PER_BATCH num_instance = cfg.DATALOADER.NUM_INSTANCE if cfg.DATALOADER.PK_SAMPLER: if cfg.DATALOADER.NAIVE_WAY: data_sampler = samplers.NaiveIdentitySampler(train_set.img_items, batch_size, num_instance) else: data_sampler = samplers.BalancedIdentitySampler(train_set.img_items, batch_size, num_instance) else: data_sampler = samplers.TrainingSampler(len(train_set)) batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, batch_size, True) train_loader = torch.utils.data.DataLoader( train_set, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=fast_batch_collator, ) return train_loader def build_reid_test_loader(cfg, dataset_name): test_transforms = build_transforms(cfg, is_train=False) dataset = DATASET_REGISTRY.get(dataset_name)() dataset.show_test() test_items = dataset.query + dataset.gallery test_set = CommDataset(test_items, test_transforms, relabel=False) num_workers = cfg.DATALOADER.NUM_WORKERS batch_size = cfg.TEST.IMS_PER_BATCH data_sampler = samplers.InferenceSampler(len(test_set)) batch_sampler = torch.utils.data.BatchSampler(data_sampler, batch_size, False) test_loader = DataLoader( test_set, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=fast_batch_collator) return test_loader, len(dataset.query) def trivial_batch_collator(batch): """ A batch collator that does nothing. """ return batch def fast_batch_collator(batched_inputs): """ A simple batch collator for most common reid tasks """ elem = batched_inputs[0] if isinstance(elem, torch.Tensor): out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype) for i, tensor in enumerate(batched_inputs): out[i] += tensor return out elif isinstance(elem, container_abcs.Mapping): return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem} elif isinstance(elem, float): return torch.tensor(batched_inputs, dtype=torch.float64) elif isinstance(elem, int_classes): return torch.tensor(batched_inputs) elif isinstance(elem, string_classes): return batched_inputs
[ "sherlockliao01@gmail.com" ]
sherlockliao01@gmail.com
5350c7e34a18d8cebb9c9bcc45be9ec798fde418
684a7d56589f7b96002646dfc26ba2de52eb7d80
/source/callback/eval_mscoco.py
ecc1b5db5b7e1be53cffa13367cc079c536f8f70
[ "Apache-2.0" ]
permissive
adewin/lambda-deep-learning-demo
7a42b935ca1ab1e92a0170bf28c7e526cffa5cb6
ebbbd63c0abf87a1a4155b17cef145039b7a1ef7
refs/heads/master
2020-07-08T13:15:51.476791
2019-04-26T21:25:44
2019-04-26T21:25:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,269
py
""" Copyright 2018 Lambda Labs. All Rights Reserved. Licensed under ========================================================================== """ import os import numpy as np from scipy import misc import tensorflow as tf from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from .callback import Callback DATASET_DIR = "/mnt/data/data/mscoco" # DATASET_META = "val2017" DATASET_META = "val2014" COCO_ID_MAP = np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]) class EvalMSCOCO(Callback): def __init__(self, config): super(EvalMSCOCO, self).__init__(config) self.detection = [] self.image_ids = [] def before_run(self, sess): self.graph = tf.get_default_graph() def after_run(self, sess): print("Detection Finished ...") # for item in self.detection: # print(item) if len(self.detection) > 0: annotation_file = os.path.join( DATASET_DIR, "annotations", "instances_" + DATASET_META + ".json") coco = COCO(annotation_file) coco_results = coco.loadRes(self.detection) # DETECTION_FILE = "/home/ubuntu/data/mscoco/results/SSD_512x512_score/detections_minival_ssd512_results.json" # coco_results = coco.loadRes(DETECTION_FILE) cocoEval = COCOeval(coco, coco_results, "bbox") cocoEval.params.imgIds = self.image_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() else: print("Found no valid detection. Consider re-train your model.") def after_step(self, sess, outputs_dict, feed_dict=None): num_images = len(outputs_dict["image_id"]) # print(num_images) # print('----------------------') for i in range(num_images): file_name = outputs_dict["file_name"][i][0] # print(file_name) num_detections = len(outputs_dict["labels"][i]) translation = outputs_dict["translations"][i] scale = outputs_dict["scales"][i] input_image = misc.imread(file_name) h, w = input_image.shape[:2] # COCO evaluation is based on per detection for d in range(num_detections): box = outputs_dict["bboxes"][i][d] box = box * [float(w), float(h), float(w), float(h)] box[0] = np.clip(box[0], 0, w) box[1] = np.clip(box[1], 0, h) box[2] = np.clip(box[2], 0, w) box[3] = np.clip(box[3], 0, h) box[2] = box[2] - box[0] box[3] = box[3] - box[1] result = { "image_id": outputs_dict["image_id"][i][0], "category_id": COCO_ID_MAP[outputs_dict["labels"][i][d]], "bbox": box, "score": outputs_dict["scores"][i][d] } self.detection.append(result) self.image_ids.append(outputs_dict["image_id"][i][0]) def build(config): return EvalMSCOCO(config)
[ "cl.chuanli@gmail.com" ]
cl.chuanli@gmail.com
d7a7d2dea431fd8d3dc35b48022975e66ec20183
f281d0d6431c1b45c6e5ebfff5856c374af4b130
/DAY001~099/DAY46-BOJ2533-사회망 서비스(SNS)/younghoon.py
38fcc0c3116cb0e7f54d7a6dd1b9073362b4c486
[]
no_license
tachyon83/code-rhino
ec802dc91dce20980fac401b26165a487494adb4
b1af000f5798cd12ecdab36aeb9c7a36f91c1101
refs/heads/master
2022-08-13T09:10:16.369287
2022-07-30T11:27:34
2022-07-30T11:27:34
292,142,812
5
6
null
null
null
null
UTF-8
Python
false
false
634
py
''' 이해못했습니당 ㅠ dp공부를 더하고 봐야겠어요 ''' import sys sys.setrecursionlimit(10**9) N=int(sys.stdin.readline()) Tree=[[] for _ in range(N+1)] check=[0 for _ in range(N+1)] for _ in range(N-1): u,v=map(int,sys.stdin.readline().split()) Tree[u].append(v) Tree[v].append(u) DP=[[0,0] for _ in range(N+1)] check=[True for _ in range(N+1)] def DFS(cur): check[cur]=False DP[cur][0]=1 DP[cur][1]=0 for i in Tree[cur]: if check[i]: DFS(i) DP[cur][0]+=DP[i][1] DP[cur][1]+=max(DP[i][0],DP[i][1]) DFS(1) print(N-max(DP[1][0],DP[1][1]))
[ "noreply@github.com" ]
tachyon83.noreply@github.com
9a74952d6bcbbe9d6c9c34e92a33ccbe56808a6b
70e9a7da3d4e2a41b30544516e166dab2495253c
/payment_trustcode/controllers/main.py
050edccfc195b6c8248b0c16dcd7b39d3caf7dfa
[ "MIT" ]
permissive
Trust-Code/odoo-brasil
bf06ea58a4e0376cb5c297c18bf48eaf97104e54
d456a10e32f56e259061afbd989942ea1aae2c2d
refs/heads/16.0
2023-08-31T16:06:21.038792
2023-01-26T19:31:31
2023-01-26T19:31:31
72,882,959
206
253
MIT
2023-08-18T17:05:49
2016-11-04T20:28:03
Python
UTF-8
Python
false
false
757
py
import logging from odoo import http from odoo.http import request from werkzeug.utils import redirect _logger = logging.getLogger(__name__) class IuguController(http.Controller): _notify_url = '/iugu/notificacao/' @http.route( '/iugu/notificacao/', type='http', auth="none", methods=['GET', 'POST'], csrf=False) def iugu_form_feedback(self, **post): request.env['payment.transaction'].sudo().form_feedback(post, 'iugu') return "<status>OK</status>" @http.route( '/iugu/checkout/redirect', type='http', auth='none', methods=['GET', 'POST']) def iugu_checkout_redirect(self, **post): post = post if 'secure_url' in post: return redirect(post['secure_url'])
[ "danimaribeiro@gmail.com" ]
danimaribeiro@gmail.com
18480acf2489cd737fa1a54137dc34a18873c149
fc43470de13ff8f03105efc2a3660a1ed6a1a553
/LeetCode/207_CourseSchedule.py
168da2eb51e923ae31c007d923cd1b462ab06a0c
[]
no_license
youseop/Problem_solutions
5a05597f188b4ef8f7d8483b46bf05fbf2158d01
1fba638d9520bca4354bca01f194f80b159e26aa
refs/heads/master
2023-06-24T05:12:45.060086
2021-07-24T14:22:33
2021-07-24T14:22:33
298,317,735
1
0
null
null
null
null
UTF-8
Python
false
false
841
py
class Solution: def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool: from collections import deque as dq course = list(0 for _ in range(numCourses)) need = dict() for i in range(numCourses): need[i] = [] for a, b in prerequisites: course[a] += 1 need[b].append(a) cnt = 0 point = dq([]) for i in range(numCourses): if course[i] == 0: point.append(i) cnt += 1 while point: a = point.popleft() for i in need[a]: course[i] -= 1 if course[i] == 0: point.append(i) cnt += 1 if cnt == numCourses: return True else: return False
[ "66366941+youseop@users.noreply.github.com" ]
66366941+youseop@users.noreply.github.com
aa316048e3e95c342aba666bf410bbb7cf9b543b
a667b52cb8d2ec857c55d33f04fc0e81d36dc681
/options/data/real/Reco16_Run179101_DV.py
4672a9ee45d24e2d7f95dfea3e8b6b5847ee3163
[]
no_license
wenyanyin/CP_violation_simulation
639d73333a3795654275cb43cc7dad7c742d1be1
7b93b2fe1050fb30d0b809b758cd5a3b2824b875
refs/heads/master
2022-04-29T14:19:23.744004
2022-04-01T13:05:18
2022-04-01T13:05:18
168,570,282
0
0
null
null
null
null
UTF-8
Python
false
false
286
py
from Configurables import DaVinci from Gaudi.Configuration import importOptions DaVinci().DataType = '2016' DaVinci().InputType = 'RDST' DaVinci().DDDBtag = 'dddb-20150724' DaVinci().CondDBtag = 'cond-20170325' importOptions('$APPCONFIGOPTS/DaVinci/DV-RawEventJuggler-0_3-to-4_2.py')
[ "Michael.Alexander@glasgow.ac.uk" ]
Michael.Alexander@glasgow.ac.uk
65b74da4eede267474cc9a56a3e2901994358f6a
06c367fe2d2233c6efb64f323e15bebd7f48c625
/saleor/product/urls.py
c3159a1923f41052ea09b230dba75e681991402d
[ "BSD-3-Clause" ]
permissive
AkioSky/FishMart
ce630bc4addf63bc105e4f3e13e92c15b119b558
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
refs/heads/master
2022-12-11T16:13:38.277080
2019-04-22T03:44:22
2019-04-22T03:44:22
182,615,627
0
0
BSD-3-Clause
2022-12-08T01:44:37
2019-04-22T03:20:03
Python
UTF-8
Python
false
false
585
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.product_list, name='list'), url(r'^(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/$', views.product_details, name='details'), url(r'^category/(?P<slug>[a-z0-9-_]+?)-(?P<category_id>[0-9]+)/$', views.category_index, name='category'), url(r'(?P<slug>[a-z0-9-_]+?)-(?P<product_id>[0-9]+)/add/$', views.product_add_to_cart, name='add-to-cart'), url(r'^collection/(?P<slug>[a-z0-9-_/]+?)-(?P<pk>[0-9]+)/$', views.collection_index, name='collection')]
[ "whitebirdinbluesky1990@gmail.com" ]
whitebirdinbluesky1990@gmail.com
5221a3cd94c2ccee498f920816799911eaf1cb15
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/380/usersdata/277/103384/submittedfiles/principal.py
e2edee6fa3b2efd552caa85372bac86a66d41972
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
522
py
# 1. Ler uma matriz m,n de inteiros while(True): m = int(input('Digite a quantidade de linhas: ')) if m >= 1 and m <= 100: break else: print('Numero invalido. Digite entre 1 e 100 (inclusive)') while(True): n = int(input('Digite a quantidade de colunas: ')) if n >= 1 and n <= 100: break else: print('Numero invalido. Digite entre 1 e 100 (inclusive)') # 2. Receber um intinerario a percorrer # - uma lista de inteiros # 3. Calcular o custo do trajeto informado
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
3aacfc56559b1cc3148120eb9b169bee48276dd4
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
/sdBs/AllRun/pg_1536+690/sdB_pg_1536+690_lc.py
110380af2de8d87ee8d50b1b95d32ee08d79b9b4
[]
no_license
tboudreaux/SummerSTScICode
73b2e5839b10c0bf733808f4316d34be91c5a3bd
4dd1ffbb09e0a599257d21872f9d62b5420028b0
refs/heads/master
2021-01-20T18:07:44.723496
2016-08-08T16:49:53
2016-08-08T16:49:53
65,221,159
0
0
null
null
null
null
UTF-8
Python
false
false
345
py
from gPhoton.gAperture import gAperture def main(): gAperture(band="NUV", skypos=[234.203458,68.869067], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1536+690/sdB_pg_1536+690_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3) if __name__ == "__main__": main()
[ "thomas@boudreauxmail.com" ]
thomas@boudreauxmail.com
ea3a6a6002d4c5c1dd9374b6e70e4f7feb25bf0a
41f39d013ae3cb2b3ca4230c77b9037cc9c894f6
/gym/gym/envs/tests/spec_list.py
b9596408afb70ecdad341bf20530ecc72bf0ef7c
[ "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
sokol1412/rllab_hierarchical_rl
162aec9bb06e271d12333fa072fb44d692c26301
6d46c02e32c3d7e9ac55d753d6a3823ff86c5a57
refs/heads/master
2020-03-07T07:37:39.510301
2018-08-19T11:54:56
2018-08-19T11:54:56
127,353,660
0
0
null
null
null
null
UTF-8
Python
false
false
1,181
py
from gym.gym import envs import os import logging logger = logging.getLogger(__name__) def should_skip_env_spec_for_tests(spec): # We skip tests for envs that require dependencies or are otherwise # troublesome to run frequently ep = spec._entry_point # Skip mujoco tests for pull request CI skip_mujoco = not (os.environ.get('MUJOCO_KEY_BUNDLE') or os.path.exists(os.path.expanduser('~/.mujoco'))) if skip_mujoco and ep.startswith('gym.envs.mujoco:'): return True if ( 'GoEnv' in ep or 'HexEnv' in ep or ep.startswith('gym.envs.box2d:') or ep.startswith('gym.envs.box2d:') or ep.startswith('gym.envs.parameter_tuning:') or ep.startswith('gym.envs.safety:Semisuper') or (ep.startswith("gym.envs.atari") and not spec.id.startswith("Pong") and not spec.id.startswith("Seaquest")) ): logger.warning("Skipping tests for env {}".format(ep)) return True return False spec_list = [spec for spec in sorted(envs.registry.all(), key=lambda x: x.id) if spec._entry_point is not None and not should_skip_env_spec_for_tests(spec)]
[ "wlasek1412@gmail.com" ]
wlasek1412@gmail.com
e279ab7f4f8a7694db03f67ed2b49a5684138c0b
70bee1e4e770398ae7ad9323bd9ea06f279e2796
/openapi_client/models/types_console_certificate_settings.py
ce30f655952ee5e232890ddeccf76d3bd9a86194
[]
no_license
hi-artem/twistlock-py
c84b420b1e582b3c4cf3631eb72dac6d659d4746
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
refs/heads/main
2023-07-18T07:57:57.705014
2021-08-22T04:36:33
2021-08-22T04:36:33
398,637,698
0
0
null
null
null
null
UTF-8
Python
false
false
6,974
py
# coding: utf-8 """ Prisma Cloud Compute API No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: 21.04.439 Generated by: https://openapi-generator.tech """ try: from inspect import getfullargspec except ImportError: from inspect import getargspec as getfullargspec import pprint import re # noqa: F401 import six from openapi_client.configuration import Configuration class TypesConsoleCertificateSettings(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'check_revocation': 'bool', 'console_ca_cert': 'str', 'console_custom_cert': 'CommonSecret', 'hpkp': 'TypesHPKPSettings' } attribute_map = { 'check_revocation': 'checkRevocation', 'console_ca_cert': 'consoleCaCert', 'console_custom_cert': 'consoleCustomCert', 'hpkp': 'hpkp' } def __init__(self, check_revocation=None, console_ca_cert=None, console_custom_cert=None, hpkp=None, local_vars_configuration=None): # noqa: E501 """TypesConsoleCertificateSettings - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._check_revocation = None self._console_ca_cert = None self._console_custom_cert = None self._hpkp = None self.discriminator = None if check_revocation is not None: self.check_revocation = check_revocation if console_ca_cert is not None: self.console_ca_cert = console_ca_cert if console_custom_cert is not None: self.console_custom_cert = console_custom_cert if hpkp is not None: self.hpkp = hpkp @property def check_revocation(self): """Gets the check_revocation of this TypesConsoleCertificateSettings. # noqa: E501 CheckRevocation indicates whether cert revocation status is required. # noqa: E501 :return: The check_revocation of this TypesConsoleCertificateSettings. # noqa: E501 :rtype: bool """ return self._check_revocation @check_revocation.setter def check_revocation(self, check_revocation): """Sets the check_revocation of this TypesConsoleCertificateSettings. CheckRevocation indicates whether cert revocation status is required. # noqa: E501 :param check_revocation: The check_revocation of this TypesConsoleCertificateSettings. # noqa: E501 :type check_revocation: bool """ self._check_revocation = check_revocation @property def console_ca_cert(self): """Gets the console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501 ConsoleCACert is a custom CA certificate for the console. # noqa: E501 :return: The console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501 :rtype: str """ return self._console_ca_cert @console_ca_cert.setter def console_ca_cert(self, console_ca_cert): """Sets the console_ca_cert of this TypesConsoleCertificateSettings. ConsoleCACert is a custom CA certificate for the console. # noqa: E501 :param console_ca_cert: The console_ca_cert of this TypesConsoleCertificateSettings. # noqa: E501 :type console_ca_cert: str """ self._console_ca_cert = console_ca_cert @property def console_custom_cert(self): """Gets the console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501 :return: The console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501 :rtype: CommonSecret """ return self._console_custom_cert @console_custom_cert.setter def console_custom_cert(self, console_custom_cert): """Sets the console_custom_cert of this TypesConsoleCertificateSettings. :param console_custom_cert: The console_custom_cert of this TypesConsoleCertificateSettings. # noqa: E501 :type console_custom_cert: CommonSecret """ self._console_custom_cert = console_custom_cert @property def hpkp(self): """Gets the hpkp of this TypesConsoleCertificateSettings. # noqa: E501 :return: The hpkp of this TypesConsoleCertificateSettings. # noqa: E501 :rtype: TypesHPKPSettings """ return self._hpkp @hpkp.setter def hpkp(self, hpkp): """Sets the hpkp of this TypesConsoleCertificateSettings. :param hpkp: The hpkp of this TypesConsoleCertificateSettings. # noqa: E501 :type hpkp: TypesHPKPSettings """ self._hpkp = hpkp def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = getfullargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, TypesConsoleCertificateSettings): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, TypesConsoleCertificateSettings): return True return self.to_dict() != other.to_dict()
[ "aakatev@virtru.com" ]
aakatev@virtru.com
bf08004a8dc519aed4684436e2e47168caab9220
f3b233e5053e28fa95c549017bd75a30456eb50c
/mcl1_input/L37/37-67_wat_20Abox/set_1ns_equi_m.py
9a30abf2ff76750baf8c5f509e5e70f035965f6b
[]
no_license
AnguseZhang/Input_TI
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
50ada0833890be9e261c967d00948f998313cb60
refs/heads/master
2021-05-25T15:02:38.858785
2020-02-18T16:57:04
2020-02-18T16:57:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
923
py
import os dir = '/mnt/scratch/songlin3/run/mcl1/L37/wat_20Abox/ti_one-step/37_67/' filesdir = dir + 'files/' temp_equiin = filesdir + 'temp_equi_m.in' temp_pbs = filesdir + 'temp_1ns_equi_m.pbs' lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078] for j in lambd: os.system("rm -r %6.5f" %(j)) os.system("mkdir %6.5f" %(j)) os.chdir("%6.5f" %(j)) os.system("rm *") workdir = dir + "%6.5f" %(j) + '/' #equiin eqin = workdir + "%6.5f_equi_m.in" %(j) os.system("cp %s %s" %(temp_equiin, eqin)) os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin)) #PBS pbs = workdir + "%6.5f_1ns_equi.pbs" %(j) os.system("cp %s %s" %(temp_pbs, pbs)) os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs)) #top os.system("cp ../37-67_merged.prmtop .") os.system("cp ../0.5_equi_0_3.rst .") #submit pbs os.system("qsub %s" %(pbs)) os.chdir(dir)
[ "songlin3@msu.edu" ]
songlin3@msu.edu
1055636efad751d50373b46a2e7170cabd726762
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_124/121.py
1ee579e02a9298b03bc8eb97a5888b8339bc66a9
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,380
py
import math def test(rank, left, right, n, x, y) : if n == 0 : if x > 0 : if right > y : return 1.0 else : return 0.0 else : if left > y : return 1.0 else : return 0.0 # left case leftres = 0 if left < rank * 2 : leftres = test(rank, left + 1, right, n - 1, x, y) # right case rightres = 0 if right < rank * 2 : rightres = test(rank, left, right + 1, n - 1, x, y) if left < rank * 2 : if right < rank * 2 : return leftres * 0.5 + rightres * 0.5 else : return leftres else : return rightres t = int(raw_input()) for casenum in range(1, t + 1) : n, x, y = [int(z) for z in raw_input().split()] if (x == 0) and (y == 0) : print "Case #%d: 1.0" % casenum continue rank = (abs(x) + y) / 2 maxn = (2 * rank * rank) + (3 * rank ) + 1 minn = (2 * rank - 1) * rank if n >= maxn : print "Case #%d: 1.0" % casenum continue elif y == rank * 2 : print "Case #%d: 0.0" % casenum continue elif n <= minn : print "Case #%d: 0.0" % casenum else : print "Case #%d: %F" % (casenum, test(rank, 0, 0, n - minn, x, y))
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
73e9576f7c22061ccd62f5111e442fa156109f2d
38cacbe9ec2f8ea4540f5aed31da60ac3595c08b
/tests/utils.py
b2b5b9aa0b054a7399086693022be53f9da50f24
[ "Apache-2.0" ]
permissive
naure/YaP
14bac663cdf31bda58dd5288f1f297ffa164a742
e4f9c8b00a463b4fedceb6d9241dd9c723607562
refs/heads/master
2020-04-06T06:56:17.027479
2016-08-23T19:34:15
2016-08-23T19:34:15
24,347,882
6
1
null
null
null
null
UTF-8
Python
false
false
968
py
import difflib def red(s): return '\033[91m' + s + '\033[0m' def green(s): return '\033[92m' + s + '\033[0m' def color_diffline(line): if line.startswith('-'): # Red return red(line) if line.startswith('+'): # Green return green(line) return line def diff(a, b, **kwargs): return '\n'.join(map( color_diffline, difflib.unified_diff( a.splitlines(), b.splitlines(), **kwargs ))) def diff_paths(pa, pb): with open(pa) as fa, open(pb) as fb: a = fa.read() b = fb.read() if a != b: return diff(a, b, fromfile=pa, tofile=pb) else: return False def compare_paths(ref_path, test_path, what='Output'): test_diff = diff_paths(ref_path, test_path) if test_diff: print(red('{} {} is different than reference {}'.format( what, test_path, ref_path))) print(test_diff) return 1 else: return 0
[ "devnull@localhost" ]
devnull@localhost
60112dc2b60f73bf2d1353313923ac3433aa5be3
30cffb7452220c2ac2961dd2e0f42e3b359a59c0
/simscale_sdk/models/ground_relative.py
9f70ec5cb7f1b769bb77f048419ec92fffa6e3f3
[ "MIT" ]
permissive
vpurcarea/simscale-python-sdk
0bf892d8824f8d4599caa0f345d5ba28e038f5eb
6f2d12b2d21142bd854042c0fb402c2c797629e4
refs/heads/master
2023-03-14T04:31:06.226337
2021-03-03T16:20:01
2021-03-03T16:20:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,290
py
# coding: utf-8 """ SimScale API The version of the OpenAPI document: 0.0.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from simscale_sdk.configuration import Configuration class GroundRelative(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'type': 'str', 'topological_reference': 'TopologicalReference' } attribute_map = { 'type': 'type', 'topological_reference': 'topologicalReference' } def __init__(self, type='GROUND_RELATIVE', topological_reference=None, local_vars_configuration=None): # noqa: E501 """GroundRelative - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._type = None self._topological_reference = None self.discriminator = None self.type = type if topological_reference is not None: self.topological_reference = topological_reference @property def type(self): """Gets the type of this GroundRelative. # noqa: E501 :return: The type of this GroundRelative. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this GroundRelative. :param type: The type of this GroundRelative. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501 raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type @property def topological_reference(self): """Gets the topological_reference of this GroundRelative. # noqa: E501 :return: The topological_reference of this GroundRelative. # noqa: E501 :rtype: TopologicalReference """ return self._topological_reference @topological_reference.setter def topological_reference(self, topological_reference): """Sets the topological_reference of this GroundRelative. :param topological_reference: The topological_reference of this GroundRelative. # noqa: E501 :type: TopologicalReference """ self._topological_reference = topological_reference def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, GroundRelative): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, GroundRelative): return True return self.to_dict() != other.to_dict()
[ "simscale" ]
simscale
6e14b31178f64e5288bfaa4ef92323615e3ab96c
ed6cb6f58b36f16d38e0f7909173c67c15575a3f
/smart_compress/models/base.py
a381b2ad5ac4abdb3dfa1faf51075fcd05ebbe9f
[]
no_license
gthparch/etri-quant
4698e50ed1f835ced5b69f928cda5bc2e357657f
36cfb72f50937b65762fac0f12f044714b755a66
refs/heads/main
2023-08-14T21:42:10.007259
2021-10-09T01:28:40
2021-10-09T01:28:40
415,169,674
0
0
null
null
null
null
UTF-8
Python
false
false
5,904
py
from abc import abstractmethod from argparse import ArgumentParser, Namespace from typing import Iterator, Type, Union import pytorch_lightning as pl import torch import torch.nn.functional as F from argparse_utils.mapping import mapping_action from smart_compress.util.pytorch.hooks import wrap_optimizer from torch import nn from torch.optim import SGD, Adam, AdamW from torch.optim.lr_scheduler import MultiStepLR from torch.optim.optimizer import Optimizer def make_optimizer_args( hparams: Namespace, **kwargs, ): optimizer_args = dict( lr=hparams.learning_rate, momentum=hparams.momentum, weight_decay=hparams.weight_decay, ) if hparams.beta1 and hparams.beta2: optimizer_args.update(dict(betas=(hparams.beta1, hparams.beta2))) if hparams.epsilon: optimizer_args.update(dict(eps=hparams.epsilon)) optimizer_args.update(kwargs) return optimizer_args def make_multistep_scheduler(optimizer: Optimizer, hparams: Namespace): return MultiStepLR( optimizer, milestones=hparams.scheduler_milestones, gamma=hparams.scheduler_gamma, ) class BaseModule(pl.LightningModule): @staticmethod def add_argparse_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument( "--optimizer_type", action=mapping_action(dict(adam=Adam, adamw=AdamW, sgd=SGD)), default="sgd", dest="optimizer_cls", ) parser.add_argument( "--scheduler_type", action=mapping_action(dict(multi_step=make_multistep_scheduler)), dest="make_scheduler_fn", ), parser.add_argument("--scheduler_gamma", type=float, default=0.1) parser.add_argument( "--scheduler_milestones", type=int, nargs="+", default=[100, 150, 200], ) parser.add_argument("--learning_rate", type=float, default=0.1) parser.add_argument("--weight_decay", type=float, default=0) parser.add_argument("--momentum", type=float, default=0.9) parser.add_argument("--beta1", type=float) parser.add_argument("--beta2", type=float) parser.add_argument("--epsilon", type=float) parser.add_argument("--measure_average_grad_norm", action="store_true") return parser def __init__(self, *args, compression=None, **kwargs): super().__init__() self.compression = compression if self.compression is None: from smart_compress.compress.fp32 import FP32 self.compression = FP32(self.hparams) self.save_hyperparameters() if self.hparams.measure_average_grad_norm: self._grads = [] def training_epoch_end(self, *args, **kwargs): if not self.hparams.measure_average_grad_norm: return super().training_epoch_end(*args, **kwargs) try: avg = torch.mean(torch.tensor(self._grads)) print(f"AVERAGE: {avg}") except: pass return super().training_epoch_end(*args, **kwargs) def loss_function(self, outputs, ground_truth): return F.cross_entropy(outputs, ground_truth) def accuracy_function(self, outputs, ground_truth): return dict() @abstractmethod def forward(self, x): raise Exception("Not implemented") def calculate_loss(self, batch): inputs, labels = batch outputs = self(inputs) loss = self.loss_function(outputs, labels) if self.hparams.compress_loss: loss.data = self.compression(loss.data, tag="loss") return labels, loss, outputs def training_step(self, batch, _batch_idx): labels, loss, outputs = self.calculate_loss(batch) self.log("train_loss", loss) for metric, value in self.accuracy_function(outputs, labels).items(): self.log(f"train_{metric}", value, on_epoch=True, prog_bar=True) return dict(loss=loss) def validation_step(self, batch, _batch_idx): labels, loss, outputs = self.calculate_loss(batch) self.log("val_loss", loss) for metric, value in self.accuracy_function(outputs, labels).items(): self.log(f"val_{metric}", value, on_epoch=True, prog_bar=True) return dict(loss=loss) def configure_optimizers(self): base_args = make_optimizer_args(self.hparams) params_bn = [] params_no_bn = [] for child in self.modules(): params = params_bn if type(child) == nn.BatchNorm2d else params_no_bn params.extend(child.parameters(recurse=False)) optimizer = self.hparams.optimizer_cls( [ dict(params=params_bn, no_weight_compression=True, **base_args), dict(params=params_no_bn, **base_args), ] ) if ( self.hparams.compress_weights or self.hparams.compress_gradients or self.hparams.compress_momentum_vectors ): optimizer = wrap_optimizer(optimizer, self.compression, self.hparams) if self.hparams.make_scheduler_fn: scheduler = self.hparams.make_scheduler_fn(optimizer, self.hparams) return [optimizer], [scheduler] return [optimizer], [] def optimizer_zero_grad(self, *args, **kwargs): if not self.hparams.measure_average_grad_norm: return super().optimizer_zero_grad(*args, **kwargs) norms = torch.tensor( [ parameter.grad.norm() for parameter in self.parameters() if parameter.grad is not None ] ) if len(norms): self._grads.append(torch.mean(norms)) return super().optimizer_zero_grad(*args, **kwargs)
[ "caojiashen24@gmail.com" ]
caojiashen24@gmail.com
4e7a822623cae02b8a770d73d3e214da5a0056c0
d8edd97f8f8dea3f9f02da6c40d331682bb43113
/networks1216.py
05847acaccb6e806629fb44d584e05d7924634a3
[]
no_license
mdubouch/noise-gan
bdd5b2fff3aff70d5f464150443d51c2192eeafd
639859ec4a2aa809d17eb6998a5a7d217559888a
refs/heads/master
2023-07-15T09:37:57.631656
2021-08-27T11:02:45
2021-08-27T11:02:45
284,072,311
0
0
null
null
null
null
UTF-8
Python
false
false
7,579
py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np __version__ = 205 # Number of continuous features (E, t, dca) n_features = 3 geom_dim = 3 class SelfAttn(nn.Module): """ Self attention Layer""" def __init__(self, dim): super().__init__() self.query_conv = nn.Conv1d(dim, dim, 1, 1, 0) self.key_conv = nn.Conv1d(dim, dim, 1, 1, 0) self.value_conv = nn.Conv1d(dim, dim, 1, 1, 0) def forward(self,x): """ inputs : x : input feature maps (BxCxL) returns : out : self attention value + input feature attention: B x L x L """ Q = self.query_conv(x).permute(0,2,1) # (B, L, C) K = self.key_conv(x) # (B, C, L) attn = torch.softmax(torch.bmm(Q, K), dim=-1) # (B, L, L) V = self.value_conv(x) # (B, C, L) out = torch.bmm(V, attn) # (B, C, L) out = out + x return out, attn def wire_hook(grad): print('wg %.2e %.2e' % (grad.abs().mean().item(), grad.std().item())) return grad class Gen(nn.Module): def __init__(self, ngf, latent_dims, seq_len, encoded_dim, n_wires): super().__init__() self.ngf = ngf self.seq_len = seq_len self.version = __version__ # Input: (B, latent_dims, 1) self.act = nn.ReLU() n512 = 256 self.lin0 = nn.Linear(latent_dims, seq_len//32*n512, bias=True) self.n512 = n512 n256 = n512 // 2 n128 = n512 // 4 n64 = n512 // 8 n32 = n512 // 16 n16 = n512 // 32 class ResBlockUp(nn.Module): def __init__(self, in_c, out_c): super().__init__() self.conv1 = nn.ConvTranspose1d(in_c, out_c, 3, 2, 1, output_padding=1) self.conv2 = nn.ConvTranspose1d(out_c, out_c, 3, 1, 1) self.convp = nn.ConvTranspose1d(in_c, out_c, 2, 2, 0, bias=False) self.bn1 = nn.InstanceNorm1d(out_c) self.bn2 = nn.InstanceNorm1d(out_c) self.act = nn.ReLU() def forward(self, x): y = self.bn1(self.act(self.conv1(x))) y = self.conv2(y) xp = self.convp(x) y = self.bn2(self.act(xp + y)) return y self.convu1 = ResBlockUp(n512, n512) self.convu2 = ResBlockUp(n512, n512) self.convu3 = ResBlockUp(n512, n512//2) # Common branch self.convu4 = ResBlockUp(n512//2, n512//4) self.convu5 = ResBlockUp(n512//4, n512//8) # W branch self.convuw1 = ResBlockUp(n512//2, n512//4) self.convuw2 = ResBlockUp(n512//4, n512//8) # P branch self.convup1 = ResBlockUp(n512//2, n512//4) self.convup2 = ResBlockUp(n512//4, n512//8) self.attnw2 = SelfAttn(n512//4) self.convw2 = nn.Conv1d(n512//4, n512//8, 7, 1, 3) self.attnw1 = SelfAttn(n512//8) self.convw1 = nn.Conv1d(n512//8, n_wires, 1, 1, 0) self.attnp2 = SelfAttn(n512//4) self.convp2 = nn.Conv1d(n512//4, n512//8, 3, 1, 1) self.attnp1 = SelfAttn(n512//8) self.convp1 = nn.Conv1d(n512//8, n_features, 7, 1, 3) def forward(self, z): #print('latent space %.2e %.2e' % (z.mean().item(), z.std().item())) # z: random point in latent space x = self.act(self.lin0(z).reshape(-1, self.n512, self.seq_len // 32)) x = self.convu1(x) x = self.convu2(x) x0 = self.convu3(x) # Common x = self.convu4(x0) x = self.convu5(x) # W w = self.convuw1(x0) w = self.convuw2(w) # P p = self.convup1(x0) p = self.convup2(p) w0 = torch.cat([x, w], dim=1) w, w_attn = self.attnw2(w0) w = self.act(self.convw2(w)) w, w_attn = self.attnw1(w) w = self.convw1(w) wg = torch.softmax(w, dim=1) p0 = torch.cat([x, p], dim=1) p, p_attn = self.attnp2(p0) p = self.act(self.convp2(p)) p, p_attn = self.attnp1(p) p = self.convp1(p) #return torch.cat([self.out(p), xy], dim=1), wg return torch.tanh(p), wg def xy_hook(grad): print('xy %.2e %.2e' % (grad.abs().mean().item(), grad.std().item())) return grad class Disc(nn.Module): def __init__(self, ndf, seq_len, encoded_dim, n_wires): super().__init__() self.version = __version__ # (B, n_features, 256) self.act = nn.LeakyReLU(0.2) n768 = 256 n512 = 256 n256 = 256 n128 = 128 n64 = 64 nproj = 8 class ResBlock(nn.Module): def __init__(self, channels): super().__init__() self.conv1 = nn.Conv1d(channels, channels, 3, 1, 1, padding_mode='circular') self.conv2 = nn.Conv1d(channels, channels, 3, 1, 1, padding_mode='circular') self.act = nn.LeakyReLU(0.2) def forward(self, x): y = self.act(self.conv1(x)) y = self.conv2(y) y = self.act(y + x) return y class ResBlockDown(nn.Module): def __init__(self, channels): super().__init__() self.conv1 = nn.Conv1d(channels, channels*2, 3, 2, 1, padding_mode='circular') self.conv2 = nn.Conv1d(channels*2, channels*2, 3, 1, 1, padding_mode='circular') self.convp = nn.Conv1d(channels, channels*2, 2, 2, 0, bias=False) self.act = nn.LeakyReLU(0.2) def forward(self, x): y = self.act(self.conv1(x)) y = self.conv2(y) xp = self.convp(x) y = self.act(y + xp) return y self.convw0 = nn.Conv1d(n_wires, nproj, 1, 1, 0, bias=False) self.conv1 = nn.Conv1d(nproj+geom_dim+n_features, n64, 17, 1, 8, padding_mode='circular') self.attn1 = SelfAttn(n64) self.rb2 = ResBlockDown(n64)#nn.Conv1d(n64, n64*2, 3, 2, 1, padding_mode='circular') self.attn2 = SelfAttn(n64*2) self.rb3 = ResBlockDown(n64*2)#nn.Conv1d(n64*2, n64*4, 3, 2, 1, padding_mode='circular') self.attn3 = SelfAttn(n64*4) self.rb4 = ResBlockDown(n64*4)#nn.Conv1d(n64*4, n64*8, 3, 2, 1, padding_mode='circular') self.lin0 = nn.Linear(n64*8, 1) self.dropout = nn.Dropout(0.2) self.out = nn.Identity() self.padleft = nn.ConstantPad1d((1, 0), 0.) def forward(self, x_): # x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len) # p_ shape is (batch, features, seq_len), # w_ is AE-encoded wire (batch, encoded_dim, seq_len) seq_len = x_.shape[2] #dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1) p = x_[:,:n_features] xy = x_[:,n_features:n_features+geom_dim] wg = x_[:,n_features+geom_dim:] w0 = self.convw0(wg) x = torch.cat([w0, p, xy], dim=1) x = self.act(self.conv1(x)) x, x_attn = self.attn1(x) x = self.rb2(x) x, x_attn = self.attn2(x) x = self.rb3(x) x, x_attn = self.attn3(x) x = self.rb4(x) x = self.lin0(x.mean(2)).squeeze() return self.out(x) def get_n_params(model): return sum(p.reshape(-1).shape[0] for p in model.parameters())
[ "m.dubouchet18@imperial.ac.uk" ]
m.dubouchet18@imperial.ac.uk
bc0fa8f250c68c7f999971227b923c04b645ab61
4768e4ad67416e8b93344ccf647954398fd69561
/microblog/db_helpers.py
3a122e3cead9c5f0c55ae35f8a93a6433a264d6e
[]
no_license
cleartext/Enterprise-Microblogging-Server
ee0624c004faa8c6ade282a28949fe9a17438e89
befff4b9ec64b395a9ef55b52ed563a4168bc07f
refs/heads/master
2016-09-06T16:23:39.454558
2011-01-25T21:56:46
2011-01-25T21:56:46
1,288,330
3
2
null
null
null
null
UTF-8
Python
false
false
678
py
""" Different database helpers, to retrive information about users. """ from microblog.models import User from microblog.exceptions import UserNotFound def get_user_by_jid(jid, session): jid = jid.split('/', 1)[0] user = session.query(User).filter(User.jid==jid).scalar() if user is None: raise UserNotFound('User with jid "%s" not found.' % jid) return user def get_all_users(session): return session.query(User) def get_user_by_username(username, session): user = session.query(User).filter(User.username==username).scalar() if user is None: raise UserNotFound('User with username "%s" not found.' % username) return user
[ "svetlyak.40wt@gmail.com" ]
svetlyak.40wt@gmail.com
a4de822d452f9b30465f5be889f1f3b10fb5bd39
6c10c6e229014dc3bf14efaec2ea8bf07c406752
/AILearning/ComputerVision/ImageClassification.py
5ddaebaa3e190d8957c87cd97df64729df342429
[]
no_license
GuyRobot/AIPythonExamples
e59c6edb355d9cadee2b3f19a087b1b656956262
4acdd0d4966e31a616910554bc075b641aa152df
refs/heads/master
2021-05-21T13:05:49.615593
2021-02-28T06:41:04
2021-02-28T06:41:04
252,662,467
0
0
null
null
null
null
UTF-8
Python
false
false
7,674
py
import collections from d2l import AllDeepLearning as d2l from mxnet import gluon, init, nd, autograd from mxnet.gluon import nn import os import pandas as pd import shutil import time import math import tarfile from pathlib import Path data_dir = "E:/Python_Data/cifar-10/" # tiny_data_dir = "E:/Python_Data/kaggle_cifar10_tiny/" # data_dir = tiny_data_dir # data_dir = "E:/Python_Data/cifar-10/" tiny_data_dir = data_dir a = "http://d2l-data.s3-accelerate.amazonaws.com/kaggle_cifar10_tiny.zip" demo = False # def download_voc_pascal(data_dir='../data'): # """Download the VOC2012 segmentation dataset.""" # voc_dir = os.path.join(data_dir, 'Cifar_Tiny') # url = "http://d2l-data.s3-accelerate.amazonaws.com/kaggle_cifar10_tiny.zip" # sha1 = '2068874e4b9a9f0fb07ebe0ad2b29754449ccacd' # fname = gluon.utils.download(url, data_dir, sha1_hash=sha1) # with tarfile.open(fname, 'r') as f: # f.extractall(data_dir) # return voc_dir def read_csv_labels(fname): with open(fname) as f: lines = f.readlines()[1:] tokens = [l.rstrip().split(',') for l in lines] return dict(((name, label) for name, label in tokens)) def copyfile(filename, target_dir): Path("%s" % target_dir).mkdir(parents=True, exist_ok=True) shutil.copy(filename, target_dir) def reorg_train_valid(data_dir, labels, valid_ratio): n = collections.Counter(labels.values()).most_common()[-1][1] n_valid_per_label = max(1, math.floor(n * valid_ratio)) label_count = {} for train_file in os.listdir(data_dir + 'train'): label = labels[train_file.split('.')[0]] fname = data_dir + 'train/' + train_file copyfile(fname, data_dir + 'train_valid_test/train_valid/' + label) if label not in label_count or label_count[label] < n_valid_per_label: copyfile(fname, data_dir + 'train_valid_test/valid/' + label) label_count[label] = label_count.get(label, 0) + 1 else: copyfile(fname, data_dir + 'train_valid_test/train/' + label) return n_valid_per_label def reorg_test(data_dir): for test_file in os.listdir(data_dir + 'test'): copyfile(data_dir + 'test/' + test_file, data_dir + 'train_valid_test/test/unknown/') def reorg_cifar10_data(data_dir, valid_ratio): labels = read_csv_labels(data_dir + 'trainLabels.csv') reorg_train_valid(data_dir, labels, valid_ratio) reorg_test(data_dir) batch_size = 1 if demo else 128 valid_ratio = 0.1 reorg_cifar10_data(tiny_data_dir, valid_ratio) transform_train = gluon.data.vision.transforms.Compose([ gluon.data.vision.transforms.Resize(40), gluon.data.vision.transforms.RandomResizedCrop(32, scale=(0.64, 0.1), ratio=(1.0, 1.0)), gluon.data.vision.transforms.RandomFlipLeftRight(), gluon.data.vision.transforms.ToTensor(), gluon.data.vision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) transform_test = gluon.data.vision.transforms.Compose([ gluon.data.vision.transforms.ToTensor(), gluon.data.vision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ]) train_ds, valid_ds, train_valid_ds, test_ds = [gluon.data.vision.ImageFolderDataset( data_dir+'train_valid_test/'+folder) for folder in ['train', 'valid', 'train_valid', 'test']] train_iter, train_valid_iter = [gluon.data.DataLoader( dataset.transform_first(transform_train), batch_size, shuffle=True, last_batch='keep') for dataset in (train_ds, train_valid_ds)] valid_iter, test_iter = [gluon.data.DataLoader( dataset.transform_first(transform_test), batch_size, shuffle=False, last_batch='keep') for dataset in [valid_ds, test_ds]] class Residual(nn.HybridBlock): def __init__(self, num_channels, use_1x1_conv=False, strides=1, **kwargs): super(Residual, self).__init__(**kwargs) self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides) self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1) if use_1x1_conv: self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm() self.bn2 = nn.BatchNorm() def hybrid_forward(self, F, X, *args, **kwargs): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return F.relu(Y + X) def resnet18(num_classes): net = nn.HybridSequential() net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1), nn.BatchNorm(), nn.Activation('relu')) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.HybridSequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1_conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes)) return net def get_net(ctx): num_classes = 10 net = resnet18(num_classes) net.initialize(init.Xavier(), ctx=ctx) return net def train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period, lr_decay): loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, "momentum": 0.9, 'wd': wd}) for epoch in range(num_epochs): train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time() if epoch > 0 and epoch % lr_period == 0: trainer.set_learning_rate(trainer.learning_rate * lr_decay) for X, y in train_iter: y = y.astype('float32').as_in_context(ctx) with autograd.record(): y_hat = net(X.as_in_context(ctx)) l = loss(y_hat, y) l.backward() trainer.step(batch_size) train_l_sum += float(l.sum().asscalar()) train_acc_sum += float((y_hat.argmax(axis=1) == y).sum().asscalar()) n += y.size time_s = "time %.2f sec" % (time.time() - start) if valid_iter is not None: valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter, ctx=ctx) epoch_s = ("epoch %d, loss %.2f, train acc %f, valid acc %f, " % (epoch + 1, train_l_sum / n, train_acc_sum / n, valid_acc)) else: epoch_s = ("epoch %d, loss %f, train acc %f" % (epoch + 1, train_l_sum / n, train_acc_sum / n)) print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate)) ctx, num_epochs, lr, wd = d2l.try_gpu(), 100, 0.1, 5e-4 lr_period, lr_decay, net = 80, 0.1, get_net(ctx) net.hybridize() train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period, lr_decay) net, preds = get_net(ctx), [] net.hybridize() train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period, lr_decay) # for X, _ in test_iter: # y_hat = net(X.as_in_context(ctx)) # preds.extend(y_hat.argmax(axis=1).astype(int).asnumpy()) # sorted_ids = list(range(1, len(test_ds) + 1)) # sorted_ids.sort(key=lambda x: str(x)) # df = pd.DataFrame({'id': sorted_ids, 'label': preds}) # df['label'] = df['label'].apply(lambda x: train_valid_ds.synsets[x]) # df.to_csv('submission.csv', index=False)
[ "bluexker@gmail.com" ]
bluexker@gmail.com
0ff320ce0727df5f0904b4620d4c6d0545a34571
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/otherforms/_mirroring.py
a70d8db0f6d067749196aa854f55063eb436422f
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
226
py
#calss header class _MIRRORING(): def __init__(self,): self.name = "MIRRORING" self.definitions = mirror self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.basic = ['mirror']
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
b4c1978dceeb02f1e9f67909e5ca91c0b929cef9
b007d88e6726452ffa8fe80300614f311ae5b318
/educative.io/coding_patterns/two_pointers/triplet_sum_to_zero.py
cf9fb664ace0958b7075ee5471e82813d78f042d
[]
no_license
jinurajan/Datastructures
ec332b12b8395f42cb769e771da3642f25ba7e7f
647fea5d2c8122468a1c018c6829b1c08717d86a
refs/heads/master
2023-07-06T14:42:55.168795
2023-07-04T13:23:22
2023-07-04T13:23:22
76,943,162
0
0
null
null
null
null
UTF-8
Python
false
false
1,271
py
""" Given an array of unsorted numbers, find all unique triplets in it that add up to zero. Example 1: Input: [-3, 0, 1, 2, -1, 1, -2] Output: [-3, 1, 2], [-2, 0, 2], [-2, 1, 1], [-1, 0, 1] Explanation: There are four unique triplets whose sum is equal to zero. Example 2: Input: [-5, 2, -1, -2, 3] Output: [[-5, 2, 3], [-2, -1, 3]] Explanation: There are two unique triplets whose sum is equal to zero. """ def search_triplets(arr): triplets = [] arr.sort() n = len(arr) def two_sum(target, left, triplets): right = len(arr) - 1 while left < right: curr = arr[left] + arr[right] if curr == target: triplets.append([-target, arr[left], arr[right]]) left += 1 right -= 1 while left < right and arr[left] == arr[left - 1]: left += 1 while right >= 0 and arr[right] == arr[right + 1]: right -= 1 elif target > curr: left += 1 # we need a pair with a bigger sum else: right -= 1 for i in range(n - 3): if i > 0 and arr[i] == arr[i - 1]: continue two_sum(-arr[i], i + 1, triplets) return triplets
[ "jinu.p.r@gmail.com" ]
jinu.p.r@gmail.com
8f3d58a6187c5c0b08864b3b0efa195d47915b34
dabf4121ac793c2cfe87ff525a8a0f7305ea2c59
/plugins/maya/publish/extract_pointcache_abc.py
a086790b49e5224c1d72527853aaeadeeda9c52b
[ "MIT" ]
permissive
Lynn5160/reveries-config
9d91210ebde47a69bb00614f95341a7ce313118f
1928e4d41acc4861ffa3260fa855ca77561285b0
refs/heads/master
2022-11-13T21:13:47.665939
2020-07-02T08:24:21
2020-07-02T08:24:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,148
py
import contextlib import pyblish.api class ExtractPointCacheABC(pyblish.api.InstancePlugin): """ """ order = pyblish.api.ExtractorOrder hosts = ["maya"] label = "Extract PointCache (abc)" families = [ "reveries.pointcache.abc", ] def process(self, instance): from maya import cmds from reveries import utils staging_dir = utils.stage_dir(dir=instance.data["_sharedStage"]) filename = "%s.abc" % instance.data["subset"] outpath = "%s/%s" % (staging_dir, filename) instance.data["repr.Alembic._stage"] = staging_dir instance.data["repr.Alembic._hardlinks"] = [filename] instance.data["repr.Alembic.entryFileName"] = filename if instance.data.get("staticCache"): start = cmds.currentTime(query=True) end = cmds.currentTime(query=True) else: context_data = instance.context.data start = context_data["startFrame"] end = context_data["endFrame"] instance.data["startFrame"] = start instance.data["endFrame"] = end euler_filter = instance.data.get("eulerFilter", False) root = instance.data["outCache"] instance.data["repr.Alembic._delayRun"] = { "func": self.export_alembic, "args": [ root, outpath, start, end, euler_filter ], } def export_alembic(self, root, outpath, start, end, euler_filter): from reveries.maya import io, lib, capsule from maya import cmds with contextlib.nested( capsule.no_undo(), capsule.no_refresh(), capsule.evaluation("off"), capsule.maintained_selection(), ): # Selection may change if there are duplicate named nodes and # require instancing them to resolve with capsule.delete_after() as delete_bin: # (NOTE) We need to check any duplicate named nodes, or # error will raised during Alembic export. result = lib.ls_duplicated_name(root) duplicated = [n for m in result.values() for n in m] if duplicated: self.log.info("Duplicate named nodes found, resolving...") # Duplicate it so we could have a unique named new node unique_named = list() for node in duplicated: new_nodes = cmds.duplicate(node, inputConnections=True, renameChildren=True) new_nodes = cmds.ls(new_nodes, long=True) unique_named.append(new_nodes[0]) # New nodes will be deleted after the export delete_bin.extend(new_nodes) # Replace duplicate named nodes with unique named root = list(set(root) - set(duplicated)) + unique_named for node in set(root): # (NOTE) If a descendent is instanced, it will appear only # once on the list returned. root += cmds.listRelatives(node, allDescendents=True, fullPath=True, noIntermediate=True) or [] root = list(set(root)) cmds.select(root, replace=True, noExpand=True) def _export_alembic(): io.export_alembic( outpath, start, end, selection=True, renderableOnly=True, writeVisibility=True, writeCreases=True, worldSpace=True, eulerFilter=euler_filter, attr=[ lib.AVALON_ID_ATTR_LONG, ], attrPrefix=[ "ai", # Write out Arnold attributes "avnlook_", # Write out lookDev controls ], ) auto_retry = 1 while auto_retry: try: _export_alembic() except RuntimeError as err: if auto_retry: # (NOTE) Auto re-try export # For unknown reason, some artist may encounter # runtime error when exporting but re-run the # publish without any change will resolve. auto_retry -= 1 self.log.warning(err) self.log.warning("Retrying...") else: raise err else: break
[ "davidlatwe@gmail.com" ]
davidlatwe@gmail.com
499c1470a1433ed4086dcaf206216e5fda9b4ec6
a839135eae95f745f1d9edb370ac459854042cce
/tests/test_feed.py
4b9c19439fe949c370936b6b86e25b4acde5b1bb
[ "MIT" ]
permissive
d21d3q/thermalprinter
8afae538fa81055bf47710390af1c213b86455fc
a502fe8a7b7ab5a0773e92a37e6539f73b34b950
refs/heads/master
2023-03-16T01:49:52.478726
2022-06-02T10:52:23
2022-06-02T10:52:23
164,908,229
0
0
MIT
2019-01-09T17:31:32
2019-01-09T17:31:25
Python
UTF-8
Python
false
false
771
py
# coding: utf-8 import pytest from thermalprinter.exceptions import ThermalPrinterValueError def test_changing_no_value(printer): printer.feed() assert printer.feeds == 1 def test_changing_good_value(printer): printer.feed(42) assert printer.feeds == 42 + 1 def test_bad_value__not_int(printer): with pytest.raises(ThermalPrinterValueError): printer.feed('42') assert printer.feeds == 42 + 1 def test_changing_bad_value__not_in_range_low(printer): with pytest.raises(ThermalPrinterValueError): printer.feed(-42) assert printer.feeds == 42 + 1 def test_changing_bad_value__not_in_range_high(printer): with pytest.raises(ThermalPrinterValueError): printer.feed(512) assert printer.feeds == 42 + 1
[ "contact@tiger-222.fr" ]
contact@tiger-222.fr
756caf90edd534e5f336c64ff1742c1aa610a6d9
945f9c5c34b42fd7863c525f7e54d2c88a5950e6
/pyppl_strict.py
c29178d57ce1b3515b5241aa48a033f133e73c5d
[ "MIT" ]
permissive
stjordanis/pyppl_strict
0c517a4e803e039b09602c385a75cbcd773514fc
efd4d361ddda3f95b1249cee612ef9f5d7b46123
refs/heads/master
2022-09-28T02:09:19.641392
2020-06-06T05:14:33
2020-06-06T05:14:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,010
py
"""More strict check of job success for PyPPL Features: 1. make sure all outputs have been generated 2. allow custom returncode settings 3. allow a custom script to check the output file """ from os import utime import random import cmdy from pyppl.plugin import hookimpl from pyppl.config import config from pyppl.jobmgr import STATES from pyppl.utils import always_list, fs, filesig from pyppl._proc import OUT_VARTYPE from pyppl._job import RC_NO_RCFILE __version__ = "0.0.7" RC_NO_OUTFILE = 5000 RC_EXPECT_FAIL = 10000 config.config.strict_rc = [0] config.config.strict_expect = "" def strict_rc_converter(rc): """Convert return code from input""" if not rc: return [0] if isinstance(rc, str): rc = always_list(rc) rc = list(rc) if 0 not in rc: rc.insert(0, 0) return rc def show_error(job, total): """Show error message for a job""" if job.rc >= RC_EXPECT_FAIL: msg = '%s (Expectation failed)' % (job.rc - RC_EXPECT_FAIL) elif job.rc >= RC_NO_OUTFILE: msg = '%s (Output file/dir not generated)' % (job.rc - RC_NO_OUTFILE) elif job.rc == RC_NO_RCFILE: msg = '- (No RC file generated)' else: msg = '%s (Script failed)' % job.rc if job.proc.errhow == 'ignore': job.logger( f'Failed but ignored (totally {total}). Return code: {msg}.', level='warning', plugin='strict' ) return job.logger(f'Failed (totally {total}). Return code: {msg}.', level='failed', plugin='strict') job.logger(f'Script: {job.dir / "job.script"}', level='failed', plugin='strict') job.logger(f'Stdout: {job.dir / "job.stdout"}', level='failed', plugin='strict') job.logger(f'Stderr: {job.dir / "job.stderr"}', level='failed', plugin='strict') # errors are not echoed, echo them out if (job.index not in job.proc.config.get('echo_jobs', []) or 'stderr' not in job.proc.config.get('echo_types', {})): job.logger('Check STDERR below:', level='failed', plugin='strict') errmsgs = [] if job.dir.joinpath('job.stderr').exists(): errmsgs = job.dir.joinpath('job.stderr').read_text().splitlines() if not errmsgs: errmsgs = ['<EMPTY STDERR>'] for errmsg in errmsgs[-20:] if len(errmsgs) > 20 else errmsgs: job.logger(errmsg, level='failed', plugin='strict') if len(errmsgs) > 20: job.logger( '[ Top {top} line(s) ignored, see all in stderr file. ]'. format(top=len(errmsgs) - 20), level='failed', plugin='strict' ) @hookimpl def logger_init(logger): """Add log levels""" logger.add_level('FAILED', 'ERROR') logger.add_sublevel('OUTFILE_NOT_EXISTS', -1) logger.add_sublevel('EXPECTATION_FAILED', -1) @hookimpl def proc_init(proc): """Add configs""" def strict_expect_converter(expect): if isinstance(expect, proc.template): return expect return proc.template(expect, **proc.envs) proc.add_config('strict_rc', default=0, converter=strict_rc_converter) proc.add_config('strict_expect', default='', converter=strict_expect_converter) @hookimpl def job_succeeded(job): """Check rc, expect and outfiles to tell if a job is really succeeded""" if job.rc not in job.proc.config.strict_rc: return False # check if all outputs are generated # refresh stat outdir, mtime = filesig(job.dir.joinpath('output'), job.proc.dirsig) utime(outdir, (mtime, mtime)) for outtype, outdata in job.output.values(): if outtype not in OUT_VARTYPE and not fs.exists(outdata): job.rc += RC_NO_OUTFILE job.logger('Outfile not generated: {}'.format(outdata), slevel="OUTFILE_NOT_EXISTS", level='debug', plugin='strict') return False expect_cmd = job.proc.config.strict_expect.render(job.data) if expect_cmd: cmd = cmdy.bash(c=expect_cmd, _raise=False) # pylint: disable=no-member if cmd.rc != 0: job.rc += RC_EXPECT_FAIL job.logger(expect_cmd, slevel="EXPECTATION_FAILED", level='error', plugin='strict') return False return True @hookimpl def proc_postrun(proc, status): """Show error message for failed jobs""" if status == 'failed': failed_jobs = [ job for job in proc.jobs if job.state in (STATES.ENDFAILED, STATES.DONEFAILED, STATES.SUBMITFAILED, STATES.BUILTFAILED, STATES.KILLED, STATES.KILLFAILED) ] failed_jobs = failed_jobs or [proc.jobs[0]] show_error(random.choice(failed_jobs), total=len(failed_jobs))
[ "pwwang@pwwang.com" ]
pwwang@pwwang.com
a5fb8ff0526b379fe6f367eb993bd6f0943b7aac
f8b4461f66801fa624ec1798c4547b6f5c9bdf51
/SpaceInvaders/train.py
e950d4b70255c9c88363bf6d9d3eb624142b5460
[]
no_license
jcw024/NEAT-retro
5c713aea81efecc108c88e7dde434586c66e35a1
fdf9a6044e934fb658ad86cad2730690b7af8975
refs/heads/master
2021-10-24T13:57:41.696033
2021-10-23T02:42:49
2021-10-23T02:42:49
165,330,293
2
1
null
null
null
null
UTF-8
Python
false
false
3,121
py
#! /usr/bin/env python from __future__ import division import retro import numpy as np import cv2 import neat import pickle import cProfile import argparse parser = argparse.ArgumentParser() parser.add_argument('--checkpoint', '-c', type=str, help='checkpoint file to continue previous run') args = parser.parse_args() #trains single genome within generation def eval_genomes(genome, config): ob = env.reset() ac = env.action_space.sample() inx, iny, inc = env.observation_space.shape inx = int(inx/5) iny = int(iny/5) net = neat.nn.recurrent.RecurrentNetwork.create(genome, config) current_max_fitness = 0 fitness_current = 0 counter = 0 lives_tot = 3 #cv2.namedWindow("network_input", cv2.WINDOW_NORMAL) #show input pixels to neat done = False while not done: env.render() #shrink screen for fewer pixel observations per loop #scaledimg = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY) #scaledimg = cv2.resize(scaledimg, (iny, inx)) ob = cv2.resize(ob, (inx, iny)) ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY) ob = np.reshape(ob, (inx,iny)) #cv2.imshow('network_input', scaledimg) #cv2.waitKey(1) imgarray = np.ndarray.flatten(ob) nnOutput = net.activate(imgarray) ob, rew1, done1, info = env.step(nnOutput) #3 steps to skip some frames ob, rew2, done2, info = env.step(nnOutput) ob, rew3, done3, info = env.step(nnOutput) rew = (rew1 + rew2 + rew3) lives = info['lives'] if lives < lives_tot: #fitness_current -= 100 lives_tot = lives fitness_current += rew if fitness_current > current_max_fitness: current_max_fitness = fitness_current counter = 0 else: counter += 1 if any([done1, done2, done3]) or counter == 400: done = True genome.fitness = fitness_current print(genome.fitness) return genome.fitness #setup training population parameters def main(checkpoint=None): if checkpoint is not None: p = neat.checkpoint.Checkpointer.restore_checkpoint(checkpoint) else: p = neat.Population(config) p.add_reporter(neat.StdOutReporter(True)) stats = neat.StatisticsReporter() p.add_reporter(stats) p.add_reporter(neat.Checkpointer(generation_interval=10,time_interval_seconds=3600)) pe = neat.ParallelEvaluator(5, eval_genomes) #run on multiple cores winner = p.run(pe.evaluate, 40) #do training for 40 generations with open('winner_pop50_gen40.pkl', 'wb') as output: print('writing winner gen to ', output) pickle.dump(winner, output) if __name__ == '__main__': env = retro.make('SpaceInvaders-Snes', '1Player.ClassicMode.UprightCabinet.state') imgarray = [] config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, 'config') main(args.checkpoint)
[ "=" ]
=
0e0a01939fd310d7aafb5cccf0c79513c9cf03ab
a7bc6a7c45c58ac08e295b77d4a19769bfd97463
/NMT/Transformers_NMT/process_data.py
98081e75fabdfaea0b0488852ea04fb70cffe3ca
[]
no_license
pnageshkar/NLP_pytorch_project
cc2a3fb8a41e7d2d8a794561e1b9a971b36f2cfa
57d2ac65a0cf21dca43a9329f6c25e6a23854e15
refs/heads/master
2022-11-14T14:59:03.099067
2020-07-09T09:05:10
2020-07-09T09:05:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,056
py
""" @file : process_data.py @author: xiaolu @time : 2019-12-26 """ ''' 主要功能: 1. 对中文文本和英文文本建立词典 然后存到vocab.pkl 2. 将训练集和验证集转化为id序列 然后存到data.pkl ''' import os import pickle from collections import Counter import jieba import nltk from tqdm import tqdm from config import Config from utils import normalizeString, encode_text def build_vocab(token, word2idx, idx2char): if token not in word2idx: next_index = len(word2idx) word2idx[token] = next_index idx2char[next_index] = token def process(file, lang='zh'): ''' 建立词表 :param file: :param lang: :return: ''' print('processing {}...'.format(file)) with open(file, 'r', encoding='utf-8') as f: data = f.readlines() word_freq = Counter() lengths = [] for line in tqdm(data): sentence = line.strip() if lang == 'en': # 若是英文 转小写 然后切分 sentence_en = sentence.lower() tokens = [normalizeString(s) for s in nltk.word_tokenize(sentence_en)] # 得到token然后再清洗 word_freq.update(list(tokens)) vocab_size = Config.n_src_vocab # 是由超参数给出的 else: # 若是中文 使用jieba进行分词 seg_list = jieba.cut(sentence.strip()) tokens = list(seg_list) word_freq.update(list(tokens)) vocab_size = Config.n_tgt_vocab lengths.append(len(tokens)) # 得到每个句子的真实长度 words = word_freq.most_common(vocab_size - 4) # vocab_size 统计出词频最高的这么多个词 word_map = {k[0]: v + 4 for v, k in enumerate(words)} # 词->id word_map['<pad>'] = 0 word_map['<sos>'] = 1 word_map['<eos>'] = 2 word_map['<unk>'] = 3 print(len(word_map)) print(words[:100]) word2idx = word_map idx2char = {v: k for k, v in word2idx.items()} return word2idx, idx2char def get_data(in_file, out_file): ''' 加载语料 :param in_file: 中文数据集路径 :param out_file: 英文数据集路径 :return: ''' print('getting data {}->{}...'.format(in_file, out_file)) with open(in_file, 'r', encoding='utf-8') as file: in_lines = file.readlines() with open(out_file, 'r', encoding='utf-8') as file: out_lines = file.readlines() samples = [] for i in tqdm(range(len(in_lines))): sentence_zh = in_lines[i].strip() tokens = jieba.cut(sentence_zh.strip()) in_data = encode_text(src_char2idx, tokens) # encode_text(src_char2idx, tokens) 将语料转为id序列 sentence_en = out_lines[i].strip().lower() tokens = [normalizeString(s.strip()) for s in nltk.word_tokenize(sentence_en)] # 将英文单词预处理 out_data = [Config.sos_id] + encode_text(tgt_char2idx, tokens) + [Config.eos_id] # 转为id 并加上开始和结束标志 # 这里的maxlen_in=50 和 maxlen_out=100 也是有超参数给出的 if len(in_data) < Config.maxlen_in and len(out_data) < Config.maxlen_out and Config.unk_id not in in_data and Config.unk_id not in out_data: samples.append({'in': in_data, 'out': out_data}) return samples if __name__ == '__main__': # 加载词表 没有的话 我们建立词表 if os.path.isfile(Config.vocab_file): with open(Config.vocab_file, 'rb') as file: data = pickle.load(file) src_char2idx = data['dict']['src_char2idx'] src_idx2char = data['dict']['src_idx2char'] tgt_char2idx = data['dict']['tgt_char2idx'] tgt_idx2char = data['dict']['tgt_idx2char'] else: src_char2idx, src_idx2char = process(Config.train_translation_zh_filename, lang='zh') tgt_char2idx, tgt_idx2char = process(Config.train_translation_en_filename, lang='en') print("输入文本字典的大小:", len(src_char2idx)) print("输出文本字典的大小:", len(tgt_char2idx)) data = { 'dict': { 'src_char2idx': src_char2idx, 'src_idx2char': src_idx2char, 'tgt_char2idx': tgt_char2idx, 'tgt_idx2char': tgt_idx2char } } with open(Config.vocab_file, 'wb') as file: pickle.dump(data, file) # 加载训练集和验证集 train = get_data(Config.train_translation_zh_filename, Config.train_translation_en_filename) valid = get_data(Config.valid_translation_zh_filename, Config.valid_translation_en_filename) data = { 'train': train, 'valid': valid } # 这里面存的数据: 中文已映射成对应得id保存, 英文也已映射成id 并且加了其实标志和结束标志.他们都没有进行padding 只是有一个最大长度 print('num_train: ' + str(len(train))) print('num_valid: ' + str(len(valid))) with open(Config.data_file, 'wb') as file: pickle.dump(data, file)
[ "lu.xiao@tcl.com" ]
lu.xiao@tcl.com
ea1091477d699b137e6ab824d5bb4743d7cd9fe0
18f672d3239d199770756ebb8000f6544b5093fb
/stock/migrations/0002_product_name_short.py
ea5dd79e743fe0fc8e7ce026a967bf1d7ae275ab
[ "Apache-2.0" ]
permissive
pkimber/old-stock-migrated-to-gitlab
a8cc4adca8c90fe9fff134ff5fd31b37e914d3db
e712dd19684cdc2028bfea42c373c19ab3aab152
refs/heads/master
2021-06-15T10:58:03.884840
2017-04-13T15:23:00
2017-04-13T15:23:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
454
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-30 12:46 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('stock', '0001_initial'), ] operations = [ migrations.AddField( model_name='product', name='name_short', field=models.CharField(blank=True, max_length=100), ), ]
[ "code@pkimber.net" ]
code@pkimber.net
d8e6164c4728fb4b27177e08f4714d8ae4094824
10123c03954bfd57e6b9ee0acbe93e61c11dc6d0
/Permutations.py
01368d7730607adce93caeb2b458812778a12f72
[]
no_license
ramchinta/python
4a720d27fd137d32d83326338ad1748c17cd5998
e82305a822ea200086a0978a29c18ab65a3b18fb
refs/heads/master
2020-09-02T06:49:30.365372
2020-05-09T08:22:57
2020-05-09T08:22:57
219,160,098
0
0
null
null
null
null
UTF-8
Python
false
false
1,022
py
'''Given a collection of distinct integers, return all possible permutations. Example: Input: [1,2,3] Output: [ [1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], [3,2,1] ]''' class Solution: def permute(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ def backtrack(first=0): # if all integers are used up if first == n: output.append(nums[:]) for i in range(first, n): # place i-th integer first # in the current permutation nums[first], nums[i] = nums[i], nums[first] # use next integers to complete the permutations backtrack(first + 1) # backtrack nums[first], nums[i] = nums[i], nums[first] n = len(nums) output = [] backtrack() return output print(Solution().permute([1,2,3])) #[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]
[ "mithrachinta@gmail.com" ]
mithrachinta@gmail.com
9ea9c27397cba8d779b2f23ebe41b720d863300d
632efa9e1991bc632aaba4783e05c942afc77712
/tests/py/test_goal.py
e180004421eec2fb754be4f7d4fe2dadeaa2aba3
[ "CC0-1.0", "LicenseRef-scancode-public-domain" ]
permissive
astiob/liberapay.com
82e2785b17a104197c483bf5dd1c1cc85cc1cb26
29842f7aeaaaca99f4427ff43ebdefd41223ff9c
refs/heads/master
2020-04-05T23:39:24.843313
2016-04-19T13:02:07
2016-04-19T13:02:07
56,925,959
0
1
null
2016-04-23T15:22:00
2016-04-23T15:22:00
null
UTF-8
Python
false
false
2,309
py
from __future__ import print_function, unicode_literals from decimal import Decimal from liberapay.testing import Harness class Tests(Harness): def setUp(self): self.alice = self.make_participant('alice') def change_goal(self, goal, goal_custom="", auth_as="alice"): return self.client.PxST( "/alice/goal", {'goal': goal, 'goal_custom': goal_custom}, auth_as=self.alice if auth_as == 'alice' else auth_as ) def test_changing_to_minus_1_asks_confirmation(self): r = self.client.PxST('/alice/goal', {'goal': '-1'}, auth_as=self.alice) assert "Warning: Doing this will remove all the tips you are currently receiving." in r.text def test_wonky_custom_amounts_are_standardized(self): self.change_goal("custom", ",100,100.00001") alice = self.alice.from_id(self.alice.id) assert alice.goal == 100100 def test_anonymous_gets_403(self): response = self.change_goal("100.00", auth_as=None) assert response.code == 403, response.code def test_invalid_is_400(self): response = self.change_goal("cheese") assert response.code == 400, response.code def test_invalid_custom_amount_is_400(self): response = self.change_goal("custom", "cheese") assert response.code == 400, response.code def test_change_goal(self): self.change_goal("custom", "100") self.change_goal("0") self.change_goal("custom", "1,100.00") self.change_goal("null", "") self.change_goal("custom", "400") actual = self.db.one("SELECT goal FROM participants") assert actual == Decimal("400.00") actual = self.db.all(""" SELECT payload FROM events WHERE type = 'set_goal' ORDER BY ts DESC """) assert actual == ['400', None, '1100.00', '0', '100'] def test_team_member_can_change_team_goal(self): team = self.make_participant('team', kind='group') team.add_member(self.alice) r = self.client.PxST( '/team/goal', {'goal': 'custom', 'goal_custom': '99.99'}, auth_as=self.alice ) assert r.code == 302 assert team.refetch().goal == Decimal('99.99')
[ "changaco@changaco.oy.lc" ]
changaco@changaco.oy.lc
086dd62330fcf5b767d3e54b7e8ca44c0db75ec7
ec1aa6a0217c29301b08c8b9559be1f8a346502b
/correctiv_eurosfueraerzte/admin/__init__.py
e4eabaa7c8211dc838ab3b52a39e8e2ede241a5f
[ "MIT" ]
permissive
correctiv/correctiv-eurosfueraerzte
091039881130fa6c7a78811cdc9bf00893aa6906
291c358d65eccf06034e409d888de56a4545c7b7
refs/heads/master
2022-12-16T03:24:41.366597
2017-08-16T13:37:33
2017-08-16T13:37:33
53,336,927
5
0
MIT
2022-11-22T01:53:29
2016-03-07T15:39:45
Python
UTF-8
Python
false
false
788
py
from django.contrib import admin from ..models import (PharmaCompany, Drug, ObservationalStudy, PaymentRecipient, PharmaPayment, ZeroDoctor, ZeroDocSubmission) from .base import (PharmaCompanyAdmin, DrugAdmin, ObservationalStudyAdmin, PaymentRecipientAdmin, PharmaPaymentAdmin) from .zerodocs import ZeroDoctorAdmin, ZeroDocSubmissionAdmin admin.site.register(PharmaCompany, PharmaCompanyAdmin) admin.site.register(Drug, DrugAdmin) admin.site.register(ObservationalStudy, ObservationalStudyAdmin) admin.site.register(PaymentRecipient, PaymentRecipientAdmin) admin.site.register(PharmaPayment, PharmaPaymentAdmin) admin.site.register(ZeroDoctor, ZeroDoctorAdmin) admin.site.register(ZeroDocSubmission, ZeroDocSubmissionAdmin)
[ "mail@stefanwehrmeyer.com" ]
mail@stefanwehrmeyer.com
f7f852b199e0f51ab15fa49c3bcdb5463ef18a76
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/NbZ2cMeEfH3KpQRku_24.py
d4f52eab2e0c8c08f9daf43571dcc4231657dce1
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
1,541
py
""" You are given a list of `0`s and `1`s, like the one below: [0, 1, 0, 0, 0, 1, 1, 1, 0, 1] # The first element, a 0, and the last element, a 1 are both unhappy. # The second element, a 1 is unhappy. # The second-to-last element, a 0 is unhappy. # All other numbers in this list are happy. A `1` is **unhappy** if the digit to its left and the digit to its right are both 0s. A `0` is **unhappy** if the digit to its left and the digit to its right are both 1s. If a number has only one neighbor, it is **unhappy** if its only neighbor is different. Otherwise, a number is **happy**. Write a function that takes in a list of `0`s and `1`s and outputs the **portion of numbers which are happy**. The total portion of numbers which are happy can be represented as: portion of happy 0s = # happy 0s / total # 0s portion of happy 1s = # happy 1s / total # 1s portion of happy numbers = (portion of happy 0s + portion of happy 1s) / 2 In the example above, `0.6` is the number of happy numbers. ### Examples portion_happy([0, 1, 0, 1, 0]) ➞ 0 portion_happy([0, 1, 1, 0]) ➞ 0.5 portion_happy([0, 0, 0, 1, 1]) ➞ 1 portion_happy([1, 0, 0, 1, 1]) ➞ 0.8 ### Notes * Remember: a `0` border number is unhappy if its only neighbor is a `1` and vice versa. * A list will contain at least two elements. """ import re def portion_happy(n): n = ''.join(map(str,n)) u = lambda x: len(re.findall("(?<!%s)%s(?!%s)"%(x,x,x),n)) return 1-((u(0)+u(1))/len(n))
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
fccf0b7bd873beb81a2e03f845b1ab61e0cd8002
a2dc75a80398dee58c49fa00759ac99cfefeea36
/bluebottle/funding/migrations/0023_bankpayoutaccount.py
4c2896a6cfd4e6e3a3b9b656a801977f9e27dd97
[ "BSD-2-Clause" ]
permissive
onepercentclub/bluebottle
e38b0df2218772adf9febb8c6e25a2937889acc0
2b5f3562584137c8c9f5392265db1ab8ee8acf75
refs/heads/master
2023-08-29T14:01:50.565314
2023-08-24T11:18:58
2023-08-24T11:18:58
13,149,527
15
9
BSD-3-Clause
2023-09-13T10:46:20
2013-09-27T12:09:13
Python
UTF-8
Python
false
false
1,430
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-08-26 10:30 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('funding', '0022_auto_20190804_1022'), ] operations = [ migrations.CreateModel( name='BankPayoutAccount', fields=[ ('payoutaccount_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='funding.PayoutAccount')), ('account_number', models.CharField(blank=True, max_length=100, null=True, verbose_name='bank account number')), ('account_holder_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='account holder name')), ('account_holder_address', models.CharField(blank=True, max_length=500, null=True, verbose_name='account holder address')), ('account_bank_country', models.CharField(blank=True, max_length=100, null=True, verbose_name='bank country')), ('account_details', models.CharField(blank=True, max_length=500, null=True, verbose_name='account details')), ], options={ 'abstract': False, }, bases=('funding.payoutaccount',), ), ]
[ "loek@goodup.com" ]
loek@goodup.com
c4fe86c6132760476a28ff976caa14c7b657506f
691793de7d07b17918d076b319281c706f7275c0
/test/test_notification_event.py
547cb7800d3e17e7eddff3c860fb253bd77ae619
[ "MIT" ]
permissive
signingtoday/signingtoday-sdk-python
1ddfae5340690c80760c500436631d4a8ff9c87f
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
refs/heads/master
2020-12-03T15:32:35.755222
2020-03-24T08:27:11
2020-03-24T08:27:11
231,372,803
0
0
null
null
null
null
UTF-8
Python
false
false
2,254
py
# coding: utf-8 """ Signing Today Web *Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501 The version of the OpenAPI document: 2.0.0 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import datetime import signing_today_client from signing_today_client.models.notification_event import NotificationEvent # noqa: E501 from signing_today_client.rest import ApiException class TestNotificationEvent(unittest.TestCase): """NotificationEvent unit test stubs""" def setUp(self): pass def tearDown(self): pass def make_instance(self, include_optional): """Test NotificationEvent include_option is a boolean, when False only required params are included, when True both required and optional params are included """ # model = signing_today_client.models.notification_event.NotificationEvent() # noqa: E501 if include_optional : return NotificationEvent( id = 1, time = '2007-04-02T19:30:10Z', dst_id = 'd6bb4c8f-37bf-4900-a1e4-dd9b0939cafb', user_id = '1ca229c8-2a99-4f3e-9421-36377fd8d9e5', dst_title = 'Contract', username = 'johndoe', email = 'jdo@bit4id.com', event = 'evt_dst_status_signed' ) else : return NotificationEvent( ) def testNotificationEvent(self): """Test NotificationEvent""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) if __name__ == '__main__': unittest.main()
[ "smartcloud@bit4id.com" ]
smartcloud@bit4id.com
6c5d71530c58702155e5b75c3fe0457827dc44a5
b8e885e1546dfb7a45dc7da7718d73ae4103196e
/nebula/__init__.py
7fd8272fa8e9757aa84f2b70df4bef4fdf4bb229
[]
no_license
dannon/nebula
b4655b3b5401c528d25ae973763c4dd82d367ab6
5ca94404894f9a64b8adf2afbc37381757ae6b7a
refs/heads/master
2021-01-18T16:00:41.768529
2014-09-23T20:29:55
2014-09-23T20:29:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
911
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from parser import NebulaCompile from scheduler import Scheduler from website import WebSite from workrepo import WorkRepo class Config: def __init__(self, mesos=None, port=9999, host='localhost', workdir="/tmp", docker=None): self.mesos = mesos self.port = port self.host = host self.workdir = workdir self.docker = docker
[ "kellrott@gmail.com" ]
kellrott@gmail.com
0d5998dc0bdef8074798becd45da509feac2c687
2f0aa66e14c6595289f6a0de2bdf71e9922052a7
/nextApi/company/serializers.py
a8e40069f87047ad03eef4ae796f22982d9f0f9f
[]
no_license
aimethierry/NextApi
8f83a2b0f499fdf5118eb930baa051584cfd9aa5
90884ee6d900ce71116b40276dda0e97bec0b521
refs/heads/master
2022-12-11T09:03:54.981284
2020-09-19T12:40:36
2020-09-19T12:40:36
296,866,571
0
0
null
null
null
null
UTF-8
Python
false
false
246
py
from rest_framework import serializers from .models import Company class companySerializer(serializers.ModelSerializer): class Meta: model = Company fields = ['name', 'country', 'city', 'street', 'pbox', 'description']
[ "aime.thierry97@gmail.com" ]
aime.thierry97@gmail.com
89859a5f2de10fec32b6d37f696243b99ef2ff8e
0c785a2601f2b02c1636d57c70039f0c4f08294a
/pybles/PySrc/tests/test_jyserver.py
81f3ce332de8e93c0ed3f6cf8c9c62c15242b1c1
[]
no_license
SoftwareEngineeringToolDemos/ICSE-2012-CodeBubbles
bc26d9655fbd56e5f61364db1c176a3539653d7f
6da209c1ff0f7fbfa958c97dc22ec478b2b5219c
refs/heads/master
2021-01-17T13:35:48.729810
2016-06-24T19:42:07
2016-06-24T19:42:07
45,094,073
0
2
null
null
null
null
UTF-8
Python
false
false
5,869
py
''' @author Fabio Zadrozny ''' import sys import unittest import socket import urllib IS_JYTHON = sys.platform.find('java') != -1 if IS_JYTHON: import os #make it as if we were executing from the directory above this one (so that we can use jycompletionserver #without the need for it being in the pythonpath) sys.argv[0] = os.path.dirname(sys.argv[0]) #twice the dirname to get the previous level from this file. sys.path.insert(1, os.path.join(os.path.dirname(sys.argv[0]))) import pycompletionserver as jycompletionserver DEBUG = 0 def dbg(s): if DEBUG: sys.stdout.write('TEST %s\n' % s) class Test(unittest.TestCase): def setUp(self): unittest.TestCase.setUp(self) def tearDown(self): unittest.TestCase.tearDown(self) def testIt(self): dbg('ok') def testMessage(self): t = jycompletionserver.T(0, 0) l = [] l.append(('Def', 'description' , 'args')) l.append(('Def1', 'description1', 'args1')) l.append(('Def2', 'description2', 'args2')) msg = t.processor.formatCompletionMessage('test_jyserver.py', l) self.assertEquals('@@COMPLETIONS(test_jyserver.py,(Def,description,args),(Def1,description1,args1),(Def2,description2,args2))END@@', msg) l = [] l.append(('Def', 'desc,,r,,i()ption', '')) l.append(('Def(1', 'descriptio(n1', '')) l.append(('De,f)2', 'de,s,c,ription2', '')) msg = t.processor.formatCompletionMessage(None, l) expected = '@@COMPLETIONS(None,(Def,desc%2C%2Cr%2C%2Ci%28%29ption, ),(Def%281,descriptio%28n1, ),(De%2Cf%292,de%2Cs%2Cc%2Cription2, ))END@@' self.assertEquals(expected, msg) def testCompletionSocketsAndMessages(self): dbg('testCompletionSocketsAndMessages') t, sToWrite, sToRead, self.connToRead, addr = self.createConnections() dbg('connections created') try: #now that we have the connections all set up, check the code completion messages. msg = urllib.quote_plus('math') toWrite = '@@IMPORTS:%sEND@@' % msg dbg('writing' + str(toWrite)) sToWrite.send(toWrite) #math completions completions = self.readMsg() dbg(urllib.unquote_plus(completions)) start = '@@COMPLETIONS(' self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start)) self.assert_(completions.find('@@COMPLETIONS') != -1) self.assert_(completions.find('END@@') != -1) msg = urllib.quote_plus('__builtin__.str') toWrite = '@@IMPORTS:%sEND@@' % msg dbg('writing' + str(toWrite)) sToWrite.send(toWrite) #math completions completions = self.readMsg() dbg(urllib.unquote_plus(completions)) start = '@@COMPLETIONS(' self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start)) self.assert_(completions.find('@@COMPLETIONS') != -1) self.assert_(completions.find('END@@') != -1) finally: try: self.sendKillMsg(sToWrite) while not hasattr(t, 'ended'): pass #wait until it receives the message and quits. sToRead.close() sToWrite.close() self.connToRead.close() except: pass def createConnections(self, p1=50002, p2=50003): ''' Creates the connections needed for testing. ''' t = jycompletionserver.T(p1, p2) t.start() sToWrite = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sToWrite.connect((jycompletionserver.HOST, p1)) sToRead = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sToRead.bind((jycompletionserver.HOST, p2)) sToRead.listen(1) #socket to receive messages. connToRead, addr = sToRead.accept() return t, sToWrite, sToRead, connToRead, addr def readMsg(self): msg = '@@PROCESSING_END@@' while msg.startswith('@@PROCESSING'): msg = self.connToRead.recv(1024) if msg.startswith('@@PROCESSING:'): dbg('Status msg:' + str(msg)) while msg.find('END@@') == -1: msg += self.connToRead.recv(1024) return msg def sendKillMsg(self, socket): socket.send(jycompletionserver.MSG_KILL_SERVER) #"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\PySrc\pycompletionserver.py 53795 58659 # #"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\PySrc\tests\test_jyserver.py # #"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython d:\runtime-workbench-workspace\jython_test\src\test.py if __name__ == '__main__': if IS_JYTHON: suite = unittest.makeSuite(Test) unittest.TextTestRunner(verbosity=1).run(suite) else: sys.stdout.write('Not running jython tests for non-java platform: %s' % sys.platform)
[ "you@example.com" ]
you@example.com
e6f76838cc8948c487e9b5d7f982f891fd930d1a
7c74ceb9f8addcc0816d012e0b84b174b96e0def
/src/azure-cli/azure/cli/command_modules/aro/_rbac.py
3b993a002cd0239d028eae1e6aec6d2af2013b6b
[ "MIT", "LGPL-2.1-only", "LGPL-2.1-or-later", "GPL-1.0-or-later", "MPL-2.0", "LGPL-2.0-or-later", "BSD-3-Clause", "Apache-2.0", "BSD-2-Clause" ]
permissive
microsoft/azure-cli
4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9
9ba64b33f6f78e2c3e42f8a147f59484300e8779
refs/heads/dev
2023-08-31T08:51:39.526556
2022-11-28T19:08:23
2022-11-28T19:08:23
370,900,439
7
7
MIT
2023-08-01T23:34:50
2021-05-26T03:59:41
Python
UTF-8
Python
false
false
2,862
py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import uuid from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.commands.client_factory import get_subscription_id from azure.cli.core.profiles import get_sdk from azure.cli.core.profiles import ResourceType from knack.log import get_logger from msrest.exceptions import ValidationError from msrestazure.tools import resource_id NETWORK_CONTRIBUTOR = '4d97b98b-1d4f-4787-a291-c67834d212e7' logger = get_logger(__name__) def _gen_uuid(): return uuid.uuid4() def _create_role_assignment(auth_client, resource, params): # retry "ValidationError: A hash conflict was encountered for the role Assignment ID. Please use a new Guid." max_retries = 3 retries = 0 while True: try: return auth_client.role_assignments.create(resource, _gen_uuid(), params) except ValidationError as ex: if retries >= max_retries: raise retries += 1 logger.warning("%s; retry %d of %d", ex, retries, max_retries) def assign_network_contributor_to_resource(cli_ctx, resource, object_id): auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION) RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') role_definition_id = resource_id( subscription=get_subscription_id(cli_ctx), namespace='Microsoft.Authorization', type='roleDefinitions', name=NETWORK_CONTRIBUTOR, ) _create_role_assignment(auth_client, resource, RoleAssignmentCreateParameters( role_definition_id=role_definition_id, principal_id=object_id, principal_type='ServicePrincipal', )) def has_network_contributor_on_resource(cli_ctx, resource, object_id): auth_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION) role_definition_id = resource_id( subscription=get_subscription_id(cli_ctx), namespace='Microsoft.Authorization', type='roleDefinitions', name=NETWORK_CONTRIBUTOR, ) for assignment in auth_client.role_assignments.list_for_scope(resource): if assignment.role_definition_id.lower() == role_definition_id.lower() and \ assignment.principal_id.lower() == object_id.lower(): return True return False
[ "noreply@github.com" ]
microsoft.noreply@github.com
52ac0359f5fb4a6ae85782c49c80f98062649017
34a70bf642b6f678dce2b22efc598656a1a7d90a
/GraphCodes/CyclesUndirectedGraph.py
5a36ee8efbc7fa46ab572f4fc8f242ead3212579
[]
no_license
CodeForContribute/Algos-DataStructures
ce89a313b3e32de57f23b263966a84bb020e6a18
d0ddc7c8f94270f9269a9a5233b3a07fe59c9a1f
refs/heads/master
2022-03-28T03:08:17.934077
2019-11-23T21:58:03
2019-11-23T21:58:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,240
py
from collections import defaultdict class Graph: def __init__(self, vertices): self.vertices = vertices self.graph = defaultdict(list) def addEdge(self, u, v): self.graph[u].append(v) # me Complexity of this method is same as time complexity of DFS traversal which is O(V+E) def isCycleIndirected(self, v, visited, recStack): visited[v] = True recStack[v] = True for neighbor in self.graph[v]: if visited[neighbor] and recStack[neighbor]: return True if self.isCycleIndirected(neighbor, visited, recStack): return True def isCycle(self): visited = [False for i in range(self.vertices)] recStack = [False for i in range(self.vertices)] for i in range(self.vertices): if not visited[i]: if self.isCycleIndirected(i, visited, recStack): return True return False if __name__ == '__main__': g = Graph(8) g.addEdge(0, 1) g.addEdge(0, 2) g.addEdge(1, 2) g.addEdge(2, 5) g.addEdge(2, 3) g.addEdge(3, 7) if g.isCycle() == 1: print("Graph has a cycle") else: print("Graph has no cycle")
[ "RAUSHAN.KUMAR2@otis.COM" ]
RAUSHAN.KUMAR2@otis.COM
b107848bc925c50961146c951860c3bfc5fbe2c3
397e125e94f4f139f2bf5055824d81f24b8b1757
/ABC/137/E.py
7c09e79bfacdc5cdd6e89aa3d749feeb7e189c9a
[]
no_license
tails1434/Atcoder
ecbab6ee238e3f225551297db961b1b502841fa4
e7c7fed36be46bbaaf020a70997842240ba98d62
refs/heads/master
2021-07-07T00:31:49.235625
2020-09-30T01:42:01
2020-09-30T01:42:01
189,009,622
0
0
null
null
null
null
UTF-8
Python
false
false
1,580
py
from collections import deque import sys input = sys.stdin.readline def main(): N, M, P = map(int, input().split()) edge = [] g = [[] for _ in range(N)] rg = [[] for _ in range(N)] for _ in range(M): A, B, C = map(int, input().split()) A -= 1 B -= 1 C -= P C = -C edge.append((A,B,C)) g[A].append(B) rg[B].append(A) # 0 => N-1の間にある閉路を検出したいので # 0とN-1からたどりつけない場所は前処理で取り除く visited1 = set() visited2 = set() visited1.add(0) visited2.add(N-1) Q = deque() Q.append(0) while Q: v = Q.popleft() for dest in g[v]: if dest in visited1: continue visited1.add(dest) Q.append(dest) Q.append(N-1) while Q: v = Q.popleft() for dest in rg[v]: if dest in visited2: continue visited2.add(dest) Q.append(dest) OK = visited1 & visited2 flag = True d = [float('inf')] * N d[0] = 0 step = 0 while flag: flag = False for A, B, C in edge: if not A in OK: continue if not B in OK: continue newD = d[A] + C if newD < d[B]: d[B] = newD flag = True step += 1 if step > N: print(-1) exit() print(max(0,-d[N-1])) if __name__ == "__main__": main()
[ "sososo1333@gmail.com" ]
sososo1333@gmail.com
a09fbeb5fff9be004c863cc1d188a6c2e4edecd2
ce196aba0adde47ea2767eae1d7983a1ef548bb8
/T30-turtle-带轴sin(x).py
65b1abeeb749519d38055915ef35d8a1c0f3e5ca
[]
no_license
xiang-daode/Python3_codes
5d2639ffd5d65065b98d029e79b8f3608a37cf0b
06c64f85ce2c299aef7f9311e9473e0203a05b09
refs/heads/main
2023-08-30T14:59:55.123128
2021-11-03T05:12:24
2021-11-03T05:12:24
333,632,892
0
2
null
null
null
null
UTF-8
Python
false
false
756
py
import math from turtle import * N = 100 def f(x): return x def jumpto(x, y): penup() goto(x,y) def line(x1, y1, x2, y2): jumpto(x1, y1) pendown() goto(x2, y2) def coosys(): width(4) pencolor('red') line(-N, 0, N+1, 0) pencolor('blue') line(0, -2, 0, 2.1) def plot(fun, y, color): pencolor(color) width(2) jumpto(-N, 0) pendown() #dot(5) for i in range(-N,N,2): yi=math.cos(i/10) goto(i,yi) #dot(5) def main(): reset() setworldcoordinates(-100,-2, 101, 2.1) speed(0) hideturtle() coosys() plot(f, 0, "green") return "Done!" if __name__ == "__main__": main() mainloop()
[ "noreply@github.com" ]
xiang-daode.noreply@github.com
a24ea528d1d4837e64a95d27a224929645603346
90c2619937019bb1145edfb2d9d6a7cdea460b57
/src/783.py
4a993223e7ea44eab55e13ecca14d8d860f897c5
[ "MIT" ]
permissive
zhaoyi3264/leetcode-solutions
2d289a7e5c74cfe7f8b019c6056ce16485ae057b
1a3a2d441cdd07a17e80b0ea43b7b266844f530c
refs/heads/main
2023-06-03T11:35:25.054669
2021-06-28T02:58:07
2021-06-28T02:58:07
349,618,427
0
0
null
null
null
null
UTF-8
Python
false
false
588
py
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: prev = -float('inf') ans = float('inf') def minDiffInBST(self, root: TreeNode) -> int: def inorder(root): if not root: return inorder(root.left) self.ans = min(self.ans, root.val - self.prev) self.prev = root.val inorder(root.right) inorder(root) return self.ans
[ "zhaoyi3264@gmail.com" ]
zhaoyi3264@gmail.com
51f1741be15eb364a9879aef8b51ed191d5ebdfa
de577e64440d2c330ff0018e8bfb7cf3abf11b70
/fsleyes/plugins/tools/addroihistogram.py
e388e0aa2128585e355eae1a475c08d34bb2f652
[ "Apache-2.0", "CC-BY-3.0", "BSD-3-Clause" ]
permissive
CGSchwarzMayo/fsleyes
bb887bf8e8dd46bb9e0e3d5c3028d97811fabad1
37b45d034d60660b6de3e4bdf5dd6349ed6d853b
refs/heads/master
2023-09-01T01:39:38.508051
2023-08-21T18:21:34
2023-08-21T18:21:34
272,476,938
0
0
NOASSERTION
2020-06-15T15:34:36
2020-06-15T15:34:34
null
UTF-8
Python
false
false
5,615
py
#!/usr/bin/env python # # addroihistogram.py - The AddROIHistogramAction class. # # Author: Paul McCarthy <pauldmccarthy@gmail.com> # """This module provides the :class:`AddROIHistogramAction` class, an action used by the :class:`.HistogramPanel`. """ import wx import numpy as np import fsl.data.image as fslimage import fsleyes.views.histogrampanel as histogrampanel import fsleyes.strings as strings import fsleyes.plotting.dataseries as dataseries import fsleyes.plotting.histogramseries as histogramseries import fsleyes.plugins.tools.addmaskdataseries as addmaskdataseries import fsleyes.actions.base as base class AddROIHistogramAction(base.Action): """The ``AddROIHistogramAction`` class is used by the :class:`.HistogramPanel`. It performs a very similar task to the :class:`.AddMaskDataSeriesAction` - the user selects a binary mask, the data within the base image is extracted for that mask, and the histogram of that data is added to the plot. """ @staticmethod def supportedViews(): """The ``AddROIHistogramAction`` is restricted for use with the :class:`.HistogramPanel`. """ return [histogrampanel.HistogramPanel] def __init__(self, overlayList, displayCtx, plotPanel): """Create an ``AddROIHistogramAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg plotPanel: The :class:`.HistogramPanel`. """ base.Action.__init__( self, overlayList, displayCtx, self.__addROIHistogram) self.__plotPanel = plotPanel self.__roiOptions = [] overlayList.addListener('overlays', self.name, self.__overlayListChanged) displayCtx .addListener('selectedOverlay', self.name, self.__overlayListChanged) self.__overlayListChanged() def destroy(self): """Must be called when this ``AddROIHistogramAction`` is no longer in use. """ if self.destroyed: return self.overlayList.removeListener('overlays', self.name) self.displayCtx .removeListener('selectedOverlay', self.name) self.__plotPanel = None self.__roiOptions = None base.Action.destroy(self) def __overlayListChanged(self, *a): """Called when the :class:`.OverlayList` or the :attr:`.DisplayContext.selectedOverlay` changes. Updates a list of valid mask images for the currently selected overlay. """ overlay = self.displayCtx.getSelectedOverlay() if (len(self.overlayList) == 0 or (not isinstance(overlay, fslimage.Image))): self.enabled = False return self.__roiOptions = [o for o in self.overlayList if isinstance(o, fslimage.Image) and o is not overlay and o.sameSpace(overlay)] self.enabled = len(self.__roiOptions) > 0 def __addROIHistogram(self): """Prompts the user to select an ROI mask, calculates the histogram of that mask on the currently selected overlay, and adds the result to the ``HistogramPanel``. """ overlay = self.displayCtx.getSelectedOverlay() opts = self.displayCtx.getOpts(overlay) roiOptions = self.__roiOptions frame = wx.GetApp().GetTopWindow() msg = strings.messages[self, 'selectMask'].format(overlay.name) title = strings.titles[ self, 'selectMask'].format(overlay.name) dlg = addmaskdataseries.MaskDialog( frame, [o.name for o in roiOptions], title=title, message=msg, checkbox=False) if dlg.ShowModal() != wx.ID_OK: return maskimg = roiOptions[dlg.GetChoice()] mask = maskimg[:] > 0 if overlay.ndim > 3: data = overlay[opts.index()][mask] else: data = overlay[mask] count = self.__plotPanel.histType == 'count' drange = (np.nanmin(data), np.nanmax(data)) nbins = histogramseries.autoBin(data, drange) xdata, ydata, _ = histogramseries.histogram(data, nbins, drange, drange, includeOutliers=False, count=count) ds = dataseries.DataSeries(overlay, self.overlayList, self.displayCtx, self.__plotPanel) ds.colour = self.__plotPanel.getOverlayPlotColour(overlay) ds.lineStyle = self.__plotPanel.getOverlayPlotStyle(overlay) ds.lineWidth = 2 ds.alpha = 1 ds.label = '{} [mask: {}]'.format(overlay.name, maskimg.name) # We have to run the data through # prepareDataSeries to preprocess # (e.g. smooth) it ds.setData(xdata, ydata) ds.setData(*self.__plotPanel.prepareDataSeries(ds)) self.__plotPanel.canvas.dataSeries.append(ds)
[ "pauldmccarthy@gmail.com" ]
pauldmccarthy@gmail.com
ff1b4b20c92851e1bdcfd180c5b9f4b46b22dbdb
0ea15da8de03fa9ad7acce50c03824ced868a4e7
/awesome_itech_project/awesome_itech_project/wsgi.py
e4fd12ce9b44444d43b7c3da64df723fa319c0de
[]
no_license
Zhouhao12345/AwesomeITECH
6b23d2c40c2c74be237eba2396b83188b7750e13
57f1f9bc304f407edbcb9f64ff289037d9aff7f2
refs/heads/master
2021-01-10T14:43:04.994235
2016-03-25T21:15:09
2016-03-25T21:15:09
52,460,370
1
1
null
null
null
null
UTF-8
Python
false
false
417
py
""" WSGI config for awesome_itech_project project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome_itech_project.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
[ "root@localhost.localdomain" ]
root@localhost.localdomain
b7d41e8d6776f9362fcd567214b789af65441908
1719920a92f7194766624474b98d59ef8d6eddaf
/models/mobile_app_content_file.py
51e891805d2376f4dff51e5a5f0d891a13e2d963
[ "MIT" ]
permissive
MIchaelMainer/msgraph-v10-models-python
cfa5e3a65ba675383975a99779763211ed9fa0a9
adad66363ebe151be2332f3ef74a664584385748
refs/heads/master
2020-03-19T12:51:06.370673
2018-06-08T00:16:12
2018-06-08T00:16:12
136,544,573
1
0
null
null
null
null
UTF-8
Python
false
false
4,626
py
# -*- coding: utf-8 -*- ''' # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. ''' from __future__ import unicode_literals from ..model.mobile_app_content_file_upload_state import MobileAppContentFileUploadState from datetime import datetime from ..one_drive_object_base import OneDriveObjectBase class MobileAppContentFile(OneDriveObjectBase): def __init__(self, prop_dict={}): self._prop_dict = prop_dict @property def azure_storage_uri(self): """ Gets and sets the azureStorageUri Returns: str: The azureStorageUri """ if "azureStorageUri" in self._prop_dict: return self._prop_dict["azureStorageUri"] else: return None @azure_storage_uri.setter def azure_storage_uri(self, val): self._prop_dict["azureStorageUri"] = val @property def is_committed(self): """ Gets and sets the isCommitted Returns: bool: The isCommitted """ if "isCommitted" in self._prop_dict: return self._prop_dict["isCommitted"] else: return None @is_committed.setter def is_committed(self, val): self._prop_dict["isCommitted"] = val @property def created_date_time(self): """ Gets and sets the createdDateTime Returns: datetime: The createdDateTime """ if "createdDateTime" in self._prop_dict: return datetime.strptime(self._prop_dict["createdDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f") else: return None @created_date_time.setter def created_date_time(self, val): self._prop_dict["createdDateTime"] = val.isoformat()+"Z" @property def name(self): """ Gets and sets the name Returns: str: The name """ if "name" in self._prop_dict: return self._prop_dict["name"] else: return None @name.setter def name(self, val): self._prop_dict["name"] = val @property def size(self): """ Gets and sets the size Returns: int: The size """ if "size" in self._prop_dict: return self._prop_dict["size"] else: return None @size.setter def size(self, val): self._prop_dict["size"] = val @property def size_encrypted(self): """ Gets and sets the sizeEncrypted Returns: int: The sizeEncrypted """ if "sizeEncrypted" in self._prop_dict: return self._prop_dict["sizeEncrypted"] else: return None @size_encrypted.setter def size_encrypted(self, val): self._prop_dict["sizeEncrypted"] = val @property def azure_storage_uri_expiration_date_time(self): """ Gets and sets the azureStorageUriExpirationDateTime Returns: datetime: The azureStorageUriExpirationDateTime """ if "azureStorageUriExpirationDateTime" in self._prop_dict: return datetime.strptime(self._prop_dict["azureStorageUriExpirationDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f") else: return None @azure_storage_uri_expiration_date_time.setter def azure_storage_uri_expiration_date_time(self, val): self._prop_dict["azureStorageUriExpirationDateTime"] = val.isoformat()+"Z" @property def upload_state(self): """ Gets and sets the uploadState Returns: :class:`MobileAppContentFileUploadState<onedrivesdk.model.mobile_app_content_file_upload_state.MobileAppContentFileUploadState>`: The uploadState """ if "uploadState" in self._prop_dict: if isinstance(self._prop_dict["uploadState"], OneDriveObjectBase): return self._prop_dict["uploadState"] else : self._prop_dict["uploadState"] = MobileAppContentFileUploadState(self._prop_dict["uploadState"]) return self._prop_dict["uploadState"] return None @upload_state.setter def upload_state(self, val): self._prop_dict["uploadState"] = val
[ "mmainer@microsoft.com" ]
mmainer@microsoft.com
ff268df9cf570e57179bf90ed24e13ec67a171d6
98ac0b139301285ece1a4bc9f13b75433d263419
/torchreid/models/mlfn.py
ec712d66f781745c4f0c329560cfd572515eda3e
[ "MIT" ]
permissive
sovrasov/deep-person-reid
eb0e6b2f0bb3fa6dc22205fd443fd583f9951cd2
79773b88986c26e9ac2407af5999923426298a8f
refs/heads/master
2021-09-13T04:31:15.620950
2019-11-14T08:06:34
2019-11-14T08:06:34
202,522,569
1
1
MIT
2019-11-14T08:06:36
2019-08-15T10:35:17
Python
UTF-8
Python
false
false
7,569
py
from __future__ import absolute_import from __future__ import division __all__ = ['mlfn'] import torch from torch import nn from torch.nn import functional as F import torch.utils.model_zoo as model_zoo model_urls = { # training epoch = 5, top1 = 51.6 'imagenet': 'https://mega.nz/#!YHxAhaxC!yu9E6zWl0x5zscSouTdbZu8gdFFytDdl-RAdD2DEfpk', } class MLFNBlock(nn.Module): def __init__(self, in_channels, out_channels, stride, fsm_channels, groups=32): super(MLFNBlock, self).__init__() self.groups = groups mid_channels = out_channels // 2 # Factor Modules self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False) self.fm_bn1 = nn.BatchNorm2d(mid_channels) self.fm_conv2 = nn.Conv2d(mid_channels, mid_channels, 3, stride=stride, padding=1, bias=False, groups=self.groups) self.fm_bn2 = nn.BatchNorm2d(mid_channels) self.fm_conv3 = nn.Conv2d(mid_channels, out_channels, 1, bias=False) self.fm_bn3 = nn.BatchNorm2d(out_channels) # Factor Selection Module self.fsm = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, fsm_channels[0], 1), nn.BatchNorm2d(fsm_channels[0]), nn.ReLU(inplace=True), nn.Conv2d(fsm_channels[0], fsm_channels[1], 1), nn.BatchNorm2d(fsm_channels[1]), nn.ReLU(inplace=True), nn.Conv2d(fsm_channels[1], self.groups, 1), nn.BatchNorm2d(self.groups), nn.Sigmoid(), ) self.downsample = None if in_channels != out_channels or stride > 1: self.downsample = nn.Sequential( nn.Conv2d(in_channels, out_channels, 1, stride=stride, bias=False), nn.BatchNorm2d(out_channels), ) def forward(self, x): residual = x s = self.fsm(x) # reduce dimension x = self.fm_conv1(x) x = self.fm_bn1(x) x = F.relu(x, inplace=True) # group convolution x = self.fm_conv2(x) x = self.fm_bn2(x) x = F.relu(x, inplace=True) # factor selection b, c = x.size(0), x.size(1) n = c // self.groups ss = s.repeat(1, n, 1, 1) # from (b, g, 1, 1) to (b, g*n=c, 1, 1) ss = ss.view(b, n, self.groups, 1, 1) ss = ss.permute(0, 2, 1, 3, 4).contiguous() ss = ss.view(b, c, 1, 1) x = ss * x # recover dimension x = self.fm_conv3(x) x = self.fm_bn3(x) x = F.relu(x, inplace=True) if self.downsample is not None: residual = self.downsample(residual) return F.relu(residual + x, inplace=True), s class MLFN(nn.Module): """Multi-Level Factorisation Net. Reference: Chang et al. Multi-Level Factorisation Net for Person Re-Identification. CVPR 2018. Public keys: - ``mlfn``: MLFN (Multi-Level Factorisation Net). """ def __init__(self, num_classes, loss='softmax', groups=32, channels=[64, 256, 512, 1024, 2048], embed_dim=1024, **kwargs): super(MLFN, self).__init__() self.loss = loss self.groups = groups # first convolutional layer self.conv1 = nn.Conv2d(3, channels[0], 7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(channels[0]) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) # main body self.feature = nn.ModuleList([ # layer 1-3 MLFNBlock(channels[0], channels[1], 1, [128, 64], self.groups), MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), MLFNBlock(channels[1], channels[1], 1, [128, 64], self.groups), # layer 4-7 MLFNBlock(channels[1], channels[2], 2, [256, 128], self.groups), MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups), MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups), MLFNBlock(channels[2], channels[2], 1, [256, 128], self.groups), # layer 8-13 MLFNBlock(channels[2], channels[3], 2, [512, 128], self.groups), MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups), MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups), MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups), MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups), MLFNBlock(channels[3], channels[3], 1, [512, 128], self.groups), # layer 14-16 MLFNBlock(channels[3], channels[4], 2, [512, 128], self.groups), MLFNBlock(channels[4], channels[4], 1, [512, 128], self.groups), MLFNBlock(channels[4], channels[4], 1, [512, 128], self.groups), ]) self.global_avgpool = nn.AdaptiveAvgPool2d(1) # projection functions self.fc_x = nn.Sequential( nn.Conv2d(channels[4], embed_dim, 1, bias=False), nn.BatchNorm2d(embed_dim), nn.ReLU(inplace=True), ) self.fc_s = nn.Sequential( nn.Conv2d(self.groups * 16, embed_dim, 1, bias=False), nn.BatchNorm2d(embed_dim), nn.ReLU(inplace=True), ) self.classifier = nn.Linear(embed_dim, num_classes) self.init_params() def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = F.relu(x, inplace=True) x = self.maxpool(x) s_hat = [] for block in self.feature: x, s = block(x) s_hat.append(s) s_hat = torch.cat(s_hat, 1) x = self.global_avgpool(x) x = self.fc_x(x) s_hat = self.fc_s(s_hat) v = (x + s_hat) * 0.5 v = v.view(v.size(0), -1) if not self.training: return v y = self.classifier(v) if self.loss == 'softmax': return y elif self.loss == 'triplet': return y, v else: raise KeyError('Unsupported loss: {}'.format(self.loss)) def init_pretrained_weights(model, model_url): """Initializes model with pretrained weights. Layers that don't match with pretrained layers in name or size are kept unchanged. """ pretrain_dict = model_zoo.load_url(model_url) model_dict = model.state_dict() pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()} model_dict.update(pretrain_dict) model.load_state_dict(model_dict) def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs): model = MLFN(num_classes, loss, **kwargs) if pretrained: #init_pretrained_weights(model, model_urls['imagenet']) import warnings warnings.warn('The imagenet pretrained weights need to be manually downloaded from {}'.format(model_urls['imagenet'])) return model
[ "k.zhou@qmul.ac.uk" ]
k.zhou@qmul.ac.uk
ca0e2147614ffa23c1d12256ceb2c465f8ef9ee1
8cefaf15f2b70bc3457047351151f85dbffc191e
/tools.py
2a0c12b25219e29de93b58540ba2a51db4d1e70a
[]
no_license
tjacek/ortho_selection
183fa86088d94f343b191538493035ad31c4d1d7
ee923bfda63262ce62033f8e633f1ac37804ce21
refs/heads/master
2023-01-21T09:18:07.671478
2020-12-02T21:53:22
2020-12-02T21:53:22
265,209,732
0
0
null
null
null
null
UTF-8
Python
false
false
2,182
py
import itertools import feats def filtered_dict(names,dic): return { name_i:dic[name_i] for name_i in names} def split(names,selector=None): if(type(names)==dict): train,test=split(names.keys(),selector) return filtered_dict(train,names),filtered_dict(test,names) if(not selector): selector=get_person train,test=[],[] for name_i in names: if(selector(name_i)): train.append(name_i) else: test.append(name_i) return train,test def get_person(name_i): return (int(name_i.split('_')[1])%2)==1 def person_cats(y): return ["%s_%d" %(y_i.split("_")[1],i) for i,y_i in enumerate(y)] def read_datasets(in_path): if(type(in_path)==tuple): common_path,deep_path=in_path if(type(common_path)==list): return multi_dataset(common_path,deep_path) return combined_dataset(common_path,deep_path) return feats.read(in_path) def combined_dataset(common_path,deep_path,sub_datasets=False): if(not common_path): return feats.read(deep_path) if(not deep_path): return feats.read(common_path) common_data=feats.read(common_path)[0] deep_data=feats.read(deep_path) datasets=[common_data+ data_i for data_i in deep_data] if(sub_datasets): return datasets,common_data,deep_data return datasets def multi_dataset(common_path,deep_path): datasets=[combined_dataset(common_i,deep_path) for common_i in common_path] return itertools.chain.from_iterable(datasets) def concat_dataset(in_path): if(type(in_path)==tuple): common_path,deep_path=in_path # raise Exception(type(common_path)) if(type(common_path)==list): common_data=feats.read_unified(common_path) else: common_data=feats.read(common_path) # return multi_dataset(common_path,deep_path) # return combined_dataset(common_path,deep_path) deep_data=feats.read(deep_path) datasets=[common_data+ data_i for data_i in deep_data] return datasets return feats.read(in_path)
[ "tjacek@agh.edu.pl" ]
tjacek@agh.edu.pl
892a16eacf42bc8b91b6898224179f9044e23c40
4dc5944ffad7f251b467200ba5101000561c2bdf
/iniciante/2879.py
0dab20c1683c9fc7cb5621f11332df7ce287b7bd
[]
no_license
DyogoBendo/URI-Python
3215337229fdb5ef1a446231925c72da4d1ea64b
40376e0fbb2e7dd97ba316a20863826b4753c601
refs/heads/master
2023-06-03T08:36:06.745086
2021-06-16T02:00:11
2021-06-16T02:00:11
314,667,743
0
0
null
null
null
null
UTF-8
Python
false
false
163
py
if __name__ == "__main__": n = int(input()) w = 0 for i in range(n): p = int(input()) if p != 1: w += 1 print(w)
[ "dyogoromagnabendo@gmail.com" ]
dyogoromagnabendo@gmail.com
89049661b471aab41331bcd78d322b1768555aa0
44e14881c8e248c347dd81f0574c4f306c684d64
/mysite/settings.py
c800c4c0e67641a4ccd20b759ee29dff8311daf4
[]
no_license
eduarde/OrderHelperApp
f38af40fc492c6300c3ac70ba2e740789b1d7261
83938a7f50519f45dcbb96d6dbf1ea49559e28fa
refs/heads/master
2021-01-10T12:30:47.142918
2016-03-17T20:05:38
2016-03-17T20:05:38
52,556,674
0
0
null
null
null
null
UTF-8
Python
false
false
3,709
py
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 1.9. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os import django.contrib.auth django.contrib.auth.LOGIN_URL = '/' # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'yan%@fw$+%fg*coibl7gnyog30wj$l5-uumhzl%8tt8r!lfxwd' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['orderhelper.pythonanywhere.com'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'orderhelper', 'widget_tweaks', 'datetimewidget', 'pure_pagination', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Bucharest' USE_I18N = True USE_L10N = True USE_TZ = True MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') LOGIN_URL ='/' LOGIN_REDIRECT_URL = '/pending' PAGINATION_SETTINGS = { 'PAGE_RANGE_DISPLAYED': 5, 'MARGIN_PAGES_DISPLAYED': 2, 'SHOW_FIRST_PAGE_WHEN_INVALID': True, } # handler404 = 'orderhelper.views.handler404'
[ "eduard.erja@gmail.com" ]
eduard.erja@gmail.com
2ea70bfaa953a1c8cc5b12b1a97ff742bf1dc11c
96dcea595e7c16cec07b3f649afd65f3660a0bad
/tests/components/picnic/test_services.py
bc80ff73a11f0e084c64a9bd9984de5caa683527
[ "Apache-2.0" ]
permissive
home-assistant/core
3455eac2e9d925c92d30178643b1aaccf3a6484f
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
refs/heads/dev
2023-08-31T15:41:06.299469
2023-08-31T14:50:53
2023-08-31T14:50:53
12,888,993
35,501
20,617
Apache-2.0
2023-09-14T21:50:15
2013-09-17T07:29:48
Python
UTF-8
Python
false
false
6,777
py
"""Tests for the Picnic services.""" from unittest.mock import MagicMock, patch import pytest from homeassistant.components.picnic import CONF_COUNTRY_CODE, DOMAIN from homeassistant.components.picnic.const import SERVICE_ADD_PRODUCT_TO_CART from homeassistant.components.picnic.services import PicnicServiceException from homeassistant.const import CONF_ACCESS_TOKEN from homeassistant.core import HomeAssistant from tests.common import MockConfigEntry UNIQUE_ID = "295-6y3-1nf4" def create_picnic_api_client(unique_id): """Create PicnicAPI mock with set response data.""" auth_token = "af3wh738j3fa28l9fa23lhiufahu7l" auth_data = { "user_id": unique_id, "address": { "street": "Teststreet", "house_number": 123, "house_number_ext": "b", }, } picnic_mock = MagicMock() picnic_mock.session.auth_token = auth_token picnic_mock.get_user.return_value = auth_data return picnic_mock async def create_picnic_config_entry(hass: HomeAssistant, unique_id): """Create a Picnic config entry.""" config_entry = MockConfigEntry( domain=DOMAIN, data={ CONF_ACCESS_TOKEN: "x-original-picnic-auth-token", CONF_COUNTRY_CODE: "NL", }, unique_id=unique_id, ) config_entry.add_to_hass(hass) await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() return config_entry @pytest.fixture def picnic_api_client(): """Return the default picnic api client.""" with patch( "homeassistant.components.picnic.create_picnic_client" ) as create_picnic_client_mock: picnic_client_mock = create_picnic_api_client(UNIQUE_ID) create_picnic_client_mock.return_value = picnic_client_mock yield picnic_client_mock @pytest.fixture async def picnic_config_entry(hass: HomeAssistant): """Generate the default Picnic config entry.""" return await create_picnic_config_entry(hass, UNIQUE_ID) async def test_add_product_using_id( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product by id.""" await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, { "config_entry_id": picnic_config_entry.entry_id, "product_id": "5109348572", "amount": 3, }, blocking=True, ) # Check that the right method is called on the api picnic_api_client.add_product.assert_called_with("5109348572", 3) async def test_add_product_using_name( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product by name.""" # Set the return value of the search api endpoint picnic_api_client.search.return_value = [ { "items": [ { "id": "2525404", "name": "Best tea", "display_price": 321, "unit_quantity": "big bags", }, { "id": "2525500", "name": "Cheap tea", "display_price": 100, "unit_quantity": "small bags", }, ] } ] await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, {"config_entry_id": picnic_config_entry.entry_id, "product_name": "Tea"}, blocking=True, ) # Check that the right method is called on the api picnic_api_client.add_product.assert_called_with("2525404", 1) async def test_add_product_using_name_no_results( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product by name that can't be found.""" # Set the search return value and check that the right exception is raised during the service call picnic_api_client.search.return_value = [] with pytest.raises(PicnicServiceException): await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, { "config_entry_id": picnic_config_entry.entry_id, "product_name": "Random non existing product", }, blocking=True, ) async def test_add_product_using_name_no_named_results( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product by name for which no named results are returned.""" # Set the search return value and check that the right exception is raised during the service call picnic_api_client.search.return_value = [{"items": [{"attr": "test"}]}] with pytest.raises(PicnicServiceException): await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, { "config_entry_id": picnic_config_entry.entry_id, "product_name": "Random product", }, blocking=True, ) async def test_add_product_multiple_config_entries( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product for a specific Picnic service while multiple are configured.""" with patch( "homeassistant.components.picnic.create_picnic_client" ) as create_picnic_client_mock: picnic_api_client_2 = create_picnic_api_client("3fj9-9gju-236") create_picnic_client_mock.return_value = picnic_api_client_2 picnic_config_entry_2 = await create_picnic_config_entry(hass, "3fj9-9gju-236") await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, {"product_id": "5109348572", "config_entry_id": picnic_config_entry_2.entry_id}, blocking=True, ) # Check that the right method is called on the api picnic_api_client.add_product.assert_not_called() picnic_api_client_2.add_product.assert_called_with("5109348572", 1) async def test_add_product_device_doesnt_exist( hass: HomeAssistant, picnic_api_client: MagicMock, picnic_config_entry: MockConfigEntry, ) -> None: """Test adding a product for a specific Picnic service, which doesn't exist.""" with pytest.raises(ValueError): await hass.services.async_call( DOMAIN, SERVICE_ADD_PRODUCT_TO_CART, {"product_id": "5109348572", "config_entry_id": 12345}, blocking=True, ) # Check that the right method is called on the api picnic_api_client.add_product.assert_not_called()
[ "noreply@github.com" ]
home-assistant.noreply@github.com
dfe1ced20ccf2b601d73682bc0dc97808d4d7108
4e82bbef275a42ea7c9d58cab546de938fd82064
/pywad/browser/firefox.py
3f6db16f9a6599f59f12f7e7f4568f41eea8103a
[]
no_license
TakesxiSximada/pywad
9a1f115f92f362ebaf5ee4767dd6b468ba9a1f51
8ad88595a8f00f4232c24167b5517db8e7c4993e
refs/heads/master
2016-09-02T04:39:17.736770
2015-03-16T09:37:45
2015-03-16T09:37:45
27,847,444
2
0
null
2015-03-16T09:34:31
2014-12-11T00:57:55
Python
UTF-8
Python
false
false
2,066
py
# -*- coding: utf-8 -*- from selenium.webdriver import Firefox, Proxy, FirefoxProfile class FirefoxFactory(object): """The borwser factory class of Firefox. """ default_profile = { 'security.warn_entering_secure': False, 'security.warn_entering_secure.show_once': True, 'security.warn_entering_weak': False, 'security.warn_entering_weak._show_once': True, 'security.warn_leaving_secure': False, 'security.warn_leaving_secure.show_once': True, 'security.warn_leaving_weak': False, 'security.warn_leaving_weak._show_once': True, 'security.warn_submit_insecure': False, 'security.warn_viewing_mixed': False, 'security.warn_viewing_mixed.show_once': True, } def __init__(self, proxy=None, implicitly_wait=10, clear_cookies=False): """Constructor. """ self.implicitly_wait = implicitly_wait self.clear_cookies = clear_cookies self.proxy = proxy def _create_proxy_setting(self): """Create proxy object. """ proxy = Proxy() if self.proxy: proxy.ftp_proxy = proxy.ssl_proxy = proxy.http_proxy = self.proxy return proxy def _create_profile(self): """Create profile object. """ profile = FirefoxProfile() for name, value in self.default_profile.items(): profile.set_preference(name, value) return profile def _create_browser_instance(self): """Start browser. """ profile = self._create_profile() proxy = self._create_proxy_setting() return Firefox(firefox_profile=profile, proxy=proxy) def create(self): """The browser factory method. """ browser = self._create_browser_instance() browser.implicitly_wait(self.implicitly_wait) if self.clear_cookies: browser.delete_allcookies() return browser def __call__(self): """Emurate factory function. """ return self.create()
[ "takesxi.sximada@gmail.com" ]
takesxi.sximada@gmail.com
618d07f2b7895b24d0458ce034dfafb50ecff6dd
39597cb5c9a04470381b630a070217506e054d3b
/deadfish.py
018611f4b61da3bb3a2a40c0f60595dc75f65877
[]
no_license
tahentx/gridshift
bc3aef88ac7736f62d187c486d079543ce0b8d68
9cc8fcc06f0784a321faee103f8ccfe3b3aa13d2
refs/heads/master
2021-06-28T23:05:20.059776
2020-05-17T16:46:46
2020-05-17T16:46:46
237,519,217
0
0
null
2021-06-10T15:47:36
2020-01-31T21:28:49
Jupyter Notebook
UTF-8
Python
false
false
488
py
def parse(data): data = list(data) box = [] value = 5 for char in data: if char == "i": value = value + 1 box.append(value) elif char == "d": value = value - 1 box.append(value) elif char == "s": value = value ** 2 box.append(value) elif char == "o": if len(box) == 0: box.append(0) return box return box parse("ii22ds")
[ "hendricks.ta@gmail.com" ]
hendricks.ta@gmail.com
f576cc3486e53efcdc3ec4c131c5bba9f36f9abd
bd08d0532f20b7285b437c9bf620de1bbcd5b9ea
/aalh_iit_jdoylewitgencollection/debug-convert-dates.py
bd1f624cc039e31df7b49158095233069740cbae
[ "Unlicense" ]
permissive
johndewees/iitmigration
a9e8a31ba6ceb541ce12c22fd612596cc243dbca
4dadfbecda719d6e7d60af076a231aedec3c862f
refs/heads/main
2023-03-14T17:06:58.777683
2021-03-27T20:44:58
2021-03-27T20:44:58
320,086,321
0
0
null
null
null
null
UTF-8
Python
false
false
1,787
py
from openpyxl import load_workbook filename = 'aalh_iit_jdoylewitgencollection.xlsx' wb = load_workbook(filename) ws = wb['Metadata Template'] minimumcol = 31 maximumcol = 31 minimumrow = 7 maximumrow = 200 iterationrow = 7 targetcol = 31 for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol): for cell in row: print(iterationrow) testvar = ws.cell(row=iterationrow, column=targetcol).value print(testvar) if testvar == None: print('No Date Digitized') elif testvar.find('/') != -1: testvarlist = testvar.split('/') testvaryear = testvarlist[2] testvaryear = testvaryear.strip() testvarmonth = testvarlist[0] testvarmonth = testvarmonth.strip() testvarmonth = int(testvarmonth) if testvarmonth < 10: testvarmonth = str(testvarmonth) testvarmonth = '0' + testvarmonth else: testvarmonth = str(testvarmonth) testvarday = testvarlist[1] testvarday = testvarday.strip() testvarday = int(testvarday) if testvarday < 10: testvarday = str(testvarday) testvarday = '0' + testvarday else: testvarday = str(testvarday) isodate = testvaryear + '-' + testvarmonth + '-' + testvarday ws.cell(row=iterationrow, column=targetcol).value = isodate else: print('Date is already formatted correctly') print(ws.cell(row=iterationrow, column=targetcol).value) iterationrow = iterationrow + 1 wb.save('aalh_iit_jdoylewitgencollection.xlsx')
[ "noreply@github.com" ]
johndewees.noreply@github.com
d08c52d6ec07887f972f0b6586973801cc248350
6cbaade56c5db347d1be9a3422a69af52df39b97
/python_workspace/3_bigdata/02_Standardization_Analysis/2_Excel/12_excel_introspect_all_workbooks.py
c11c104903944b5e2737168189535a1e3837ade5
[]
no_license
baewonje/iot_bigdata_-
b54e3772f64b9695efee8632183590628b679e11
2ce1af67d2f05abeb2ecd442b7299f349bdb9753
refs/heads/master
2020-09-06T09:53:53.018320
2019-12-06T08:19:33
2019-12-06T08:19:33
220,390,928
0
0
null
null
null
null
UTF-8
Python
false
false
682
py
# !/usr/bin/env python 3 import glob import sys import os from xlrd import open_workbook input_directory = sys.argv[1] # Parameters = . workbook_counter = 0 for input_file in glob.glob(os.path.join(input_directory, '*.xls*')): workbook = open_workbook(input_file) print('Workbook: {}'.format(os.path.basename(input_file))) print('Number of worksheets: {}'.format(workbook.nsheets)) for worksheet in workbook.sheets(): print('Worksheet name:', worksheet.name, '\tRows', worksheet.nrows, '\tColumns:', worksheet.ncols) workbook_counter += 1 print('Number of Excel workbooks: {}'.format(workbook_counter))
[ "50129576+baewonje@users.noreply.github.com" ]
50129576+baewonje@users.noreply.github.com
e08c2dc8691e3a89398eb76dc78a075bebb81438
7e616a3b1928940467ec09a82b52d5b4d83984a1
/MODULE1/Activities/PREFECT/create_dw.py
1dede11f56bea15c470fed5c431d3cb08e61a576
[]
no_license
gustavo32/DataEngineeringBootcamp
70e4c2fb06a387418718df2929b89820a0a76c0d
704dbe11f33f27ab9eda5649990685f153048429
refs/heads/main
2023-01-28T17:38:36.182545
2020-12-07T20:12:37
2020-12-07T20:12:37
316,643,488
0
0
null
null
null
null
UTF-8
Python
false
false
3,135
py
from datetime import datetime, timedelta import pendulum import prefect from prefect import task, Flow from prefect.schedules import CronSchedule import pandas as pd from io import BytesIO import zipfile import requests import sqlalchemy import psycopg2 schedule = CronSchedule( cron= '*/10 * * * *', start_date=pendulum.datetime(2020, 12, 5, 14, tz="America/Sao_Paulo") ) @task def get_raw_date(): url = 'http://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip' filebytes = BytesIO(requests.get(url).content) zipped = zipfile.ZipFile(filebytes) zipped.extractall() return './microdados_enade_2019/2019/3.DADOS/' @task def apply_filters(path): interested_cols = ['CO_GRUPO', 'NU_IDADE', 'TP_SEXO', 'NT_GER', 'NT_FG', 'NT_CE', 'QE_I01', 'QE_I02', 'QE_I04', 'QE_I05', 'QE_I08'] df = pd.read_csv(path + 'microdados_enade_2019.txt', sep=';', decimal=',', usecols=interested_cols) df[(df.NU_IDADE > 20) & (df.NU_IDADE < 40) & (df.NT_GER > 0)] return df @task def get_mean_normalized_age(df): coppied_df = df.copy() coppied_df['mean_normalized_age'] = coppied_df.NU_IDADE - coppied_df.NU_IDADE.mean() return coppied_df[['mean_normalized_age']] @task def get_squared_mean_normalized_age(df): coppied_df = df.copy() coppied_df['squared_mean_normalized_age'] = coppied_df['mean_normalized_age'] ** 2 return coppied_df[['squared_mean_normalized_age']] @task def get_marital_status(df): coppied_df = df.copy() coppied_df['marital_status'] = coppied_df.QE_I01.replace({ 'A': 'SINGLE', 'B': 'MARRIED', 'C': 'DIVORCED', 'D': 'WIDOWED', 'E': 'OTHERS' }) return coppied_df[['marital_status']] @task def get_skin_color(df): coppied_df = df.copy() coppied_df['skin_color'] = coppied_df.QE_I01.replace({ 'A': 'WHITE', 'B': 'BLACK', 'C': 'YELLOW', 'D': 'BROWN', 'E': 'INDIGENOUS', 'F': '', ' ': '' }) return coppied_df[['skin_color']] @task def join_data(dfs): final = pd.concat(dfs, axis=1) logger = prefect.context.get('logger') logger.info(final.head(2).to_json()) return final @task def write_dw(df): engine = sqlalchemy.create_engine( 'postgresql://postgres:123456@localhost:5432/enade') df.to_sql('enade', con=engine, index=False, if_exists='replace', method='multi', chunksize=100000) with Flow('Enade', schedule) as flow: path = get_raw_date() df = apply_filters(path) normalized_mean_age = get_mean_normalized_age(df) normalized_squared_mean_age = get_squared_mean_normalized_age(normalized_mean_age) marital_status = get_marital_status(df) skin_color = get_skin_color(df) final = join_data([ df, normalized_mean_age, normalized_squared_mean_age, marital_status, skin_color ]) dw = write_dw(final) flow.register(project_name='IGTI', idempotency_key=flow.serialized_hash()) flow.run_agent(token='D_0wWDFgx0e67I2IIbf7Ew')
[ "you@example.com" ]
you@example.com
e50542f228d6759629bd4a34572c3eb303e86f74
3f1a6ad5c0c9015653206f45f5395d006b663a14
/Q Learning/MTM/ARVIND/Run 9/GlobalVariables.py
3e2d5879ffdeb20c3f0fd1e851beb570cc01e9b0
[]
no_license
ciddhijain/Results
075c9c51bf8bebefdf3ca87f66a50c9c39d8461e
a3ce350885264a6bf5c277bd811ad1a9931ced18
refs/heads/master
2021-01-10T19:23:17.764638
2015-07-03T14:51:48
2015-07-03T14:51:48
37,335,721
0
0
null
null
null
null
UTF-8
Python
false
false
3,572
py
__author__ = 'Ciddhi' from datetime import timedelta, datetime databaseName = 'QL_ARVIND' # This is database name to which connection is made userName = 'root' # This is the user name for database connection password = 'controljp' # This is the password for database connection dbHost = '127.0.0.1' # This is host address for database connection dbPort = '3306' # This is port for database connection dbConnector = 'mysqlconnector' # This is the connector string to be used, depending upon python package startDate = datetime(2014, 12, 1).date() # This is the start of training period endDate = datetime(2015, 4, 10).date() # This is the end of training period rankingDays = 15 # This is the number of days for which ranking is done initializationDays = 15 # This is the number of days for which q_matrix is initilaized liveDays = 15 # This is the number of days for which live trading is done logFileName = "QLearningARVIND" maxProcesses = 3 # This is the number of maximum processes #----------------------------------------------------------------------------------------------------------------------------------------- # These variables need to contain list values alpha = [0.2, 0.4, 0.5, 0.6, 0.8] # This defines the weightage to long trades as compared to short trades while constructing reward matrix gamma = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] # This defines the weightage of old data as compared to latest observations of reward matrix #maxGreedyLevel = [5] # This defines the number of future states for which reward is to be maximized in construction of q matrix #individualFactor = [8] # This defines the factor of total asset which is to be allocated to each strategy zeroRange = [0.002, 0.006, 0.01] # This determines the spread between states 0, 1, 2 factorLevelPairs = [[5, 12], [10, 12], [10, 8]] #------------------------------------------------------------------------------------------------------------------------------------------ dummyIndividualId = -1 # This is to keep a track of max total capital that is invested in the portfolio unitQty = 250000 # This is the amount of each decrement in asset hourWindow = 1 # This is the window after which re-allocation is done maxTotalAsset = 10000000 # This is the total asset deployed trainingFactor = 2 trainingMaxTotalAsset = maxTotalAsset*trainingFactor # This is the total asset deployed while training dummyPerformance = -50000 performanceMonthlyOutfileNameBase = 'arvind mtm monthly 15 15 15' performanceOutfileNameBase = 'arvind mtm 15 15 15' latestIndividualTableBase = "latest_individual_table" trainingTradesheetTableBase = "training_tradesheet_data_table" trainingAssetTableBase = "training_asset_allocation_table" rankingTableBase = "ranking_table" dailyAssetTableBase = "asset_daily_allocation_table" newTradesheetTableBase = "tradesheet_data_table" assetTableBase = "asset_allocation_table" qMatrixTableBase = "q_matrix_table" reallocationTableBase = "reallocation_table" performanceTableBase = "performance_table" rankingWalkforwardTableBase = "ranking_walkforward_table" dailyMtmTableBase = "daily_mtm_table"
[ "ciddhijain@gmail.com" ]
ciddhijain@gmail.com
cac0bf55441435485f57281a5edd5505e43adbd5
53db22afe7b2dc8344dd2ab0636644109708128f
/clab/nninit/__init__.py
f4854ab3143e64ac2d0a2683aa9e6858db9ca291
[ "Apache-2.0" ]
permissive
Erotemic/clab
5b5c11b2e4d39642f071c560e0e7caf3397c372d
89af79816d219cbecefefa209c0f6dc1fe390375
refs/heads/master
2018-10-09T18:04:31.762389
2018-08-13T14:23:38
2018-08-13T14:23:38
110,250,463
3
3
null
null
null
null
UTF-8
Python
false
false
747
py
""" python -c "import ubelt._internal as a; a.autogen_init('clab.nninit')" python -m clab """ # flake8: noqa from clab.nninit import base from clab.nninit import lsuv from clab.nninit.base import (HeNormal, KaimingNormal, KaimingUniform, NoOp, Orthogonal, Pretrained, VGG16, apply_initializer, constant, he_normal, he_uniform, init_he_normal, kaiming_normal, kaiming_uniform, load_partial_state, normal, orthogonal, shock, shock_he, sparse, trainable_layers, uniform, xavier_normal, xavier_uniform,) from clab.nninit.lsuv import (LSUV, Orthonormal, svd_orthonormal,)
[ "crallj@rpi.edu" ]
crallj@rpi.edu
9c198d82430f363233c68b084eddf9cf7586ae1e
d61183674ed7de0de626490cfba77d67c298d1be
/py_scripts/bench_plot_neighbors.py
54b896c2395f7e0598e4cd69e6dd4c51357c11ad
[]
no_license
Giannos-G/python_dataset
bc670a53143d92cf781e88dee608da38b0e63886
18e24cbef16ada1003a3e15a2ed2a3f995f25e46
refs/heads/main
2023-07-25T20:24:31.988271
2021-09-09T10:31:41
2021-09-09T10:31:41
363,489,911
2
0
null
null
null
null
UTF-8
Python
false
false
6,405
py
""" Plot the scaling of the nearest neighbors algorithms with k, D, and N """ from time import time import numpy as np import matplotlib.pyplot as plt from matplotlib import ticker from sklearn import neighbors, datasets def get_data(N, D, dataset='dense'): if dataset == 'dense': np.random.seed(0) return np.random.random((N, D)) elif dataset == 'digits': X, _ = datasets.load_digits(return_X_y=True) i = np.argsort(X[0])[::-1] X = X[:, i] return X[:N, :D] else: raise ValueError("invalid dataset: %s" % dataset) def barplot_neighbors(Nrange=2 ** np.arange(1, 11), Drange=2 ** np.arange(7), krange=2 ** np.arange(10), N=1000, D=64, k=5, leaf_size=30, dataset='digits'): algorithms = ('kd_tree', 'brute', 'ball_tree') fiducial_values = {'N': N, 'D': D, 'k': k} #------------------------------------------------------------ # varying N N_results_build = {alg: np.zeros(len(Nrange)) for alg in algorithms} N_results_query = {alg: np.zeros(len(Nrange)) for alg in algorithms} for i, NN in enumerate(Nrange): print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange))) X = get_data(NN, D, dataset) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k), algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() N_results_build[algorithm][i] = (t1 - t0) N_results_query[algorithm][i] = (t2 - t1) #------------------------------------------------------------ # varying D D_results_build = {alg: np.zeros(len(Drange)) for alg in algorithms} D_results_query = {alg: np.zeros(len(Drange)) for alg in algorithms} for i, DD in enumerate(Drange): print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange))) X = get_data(N, DD, dataset) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=k, algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() D_results_build[algorithm][i] = (t1 - t0) D_results_query[algorithm][i] = (t2 - t1) #------------------------------------------------------------ # varying k k_results_build = {alg: np.zeros(len(krange)) for alg in algorithms} k_results_query = {alg: np.zeros(len(krange)) for alg in algorithms} X = get_data(N, DD, dataset) for i, kk in enumerate(krange): print("k = %i (%i out of %i)" % (kk, i + 1, len(krange))) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=kk, algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() k_results_build[algorithm][i] = (t1 - t0) k_results_query[algorithm][i] = (t2 - t1) plt.figure(figsize=(8, 11)) for (sbplt, vals, quantity, build_time, query_time) in [(311, Nrange, 'N', N_results_build, N_results_query), (312, Drange, 'D', D_results_build, D_results_query), (313, krange, 'k', k_results_build, k_results_query)]: ax = plt.subplot(sbplt, yscale='log') plt.grid(True) tick_vals = [] tick_labels = [] bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg]))) for alg in algorithms]) for i, alg in enumerate(algorithms): xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals)) width = 0.8 c_bar = plt.bar(xvals, build_time[alg] - bottom, width, bottom, color='r') q_bar = plt.bar(xvals, query_time[alg], width, build_time[alg], color='b') tick_vals += list(xvals + 0.5 * width) tick_labels += ['%i' % val for val in vals] plt.text((i + 0.02) / len(algorithms), 0.98, alg, transform=ax.transAxes, ha='left', va='top', bbox=dict(facecolor='w', edgecolor='w', alpha=0.5)) plt.ylabel('Time (s)') ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals)) ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels)) for label in ax.get_xticklabels(): label.set_rotation(-90) label.set_fontsize(10) title_string = 'Varying %s' % quantity descr_string = '' for s in 'NDk': if s == quantity: pass else: descr_string += '%s = %i, ' % (s, fiducial_values[s]) descr_string = descr_string[:-2] plt.text(1.01, 0.5, title_string, transform=ax.transAxes, rotation=-90, ha='left', va='center', fontsize=20) plt.text(0.99, 0.5, descr_string, transform=ax.transAxes, rotation=-90, ha='right', va='center') plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16) plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'), 'upper right') if __name__ == '__main__': barplot_neighbors(dataset='digits') barplot_neighbors(dataset='dense') #plt.show()
[ "giannos.gavrielides@gmail.com" ]
giannos.gavrielides@gmail.com
fb518482a9258267dfecca4ed56546ae95d000cf
03b98276b252aa0e94142b65fd4b9dcd0a4d12ee
/Project.py
4d44581f8e3cc0aa66a52956f2641de9b4be4d85
[]
no_license
FrancescoSaverioZuppichini/scientists-keypoints-classification
b2c0ec30f5f7067d3809955f26a2f0cb051cdc2a
d1c4b6010e8c0f13271cd3304a137febf927c51d
refs/heads/main
2023-03-02T07:44:40.767193
2021-01-31T15:56:12
2021-01-31T15:56:12
334,368,509
0
0
null
2021-01-31T15:56:13
2021-01-30T08:49:52
Jupyter Notebook
UTF-8
Python
false
false
518
py
from dataclasses import dataclass from pathlib import Path @dataclass class Project: """ This class represents our project. It stores useful information about the structure, e.g. paths. """ base_dir: Path = Path(__file__).parents[0] data_dir: Path = base_dir / 'dataset' checkpoint_dir: Path = base_dir / 'checkpoint' def __post_init__(self): # create the directories if they don't exist self.data_dir.mkdir(exist_ok=True) self.checkpoint_dir.mkdir(exist_ok=True)
[ "zuppif@usi.ch" ]
zuppif@usi.ch
a56e5646913bda05dbd46183d6ccfa161a785312
7a0f7ce00528b103c7b8b501f1e8333fc3a0836c
/Class Based Views/CBV/CBV/CBV_app/urls.py
e9a5006b2991b23bdefa7672692ccb6ffcd53603
[]
no_license
StefanDimitrovDimitrov/DjangoRESTFramework
605ea044f4039d5bb8017ffe2f33c54ea0ebacc0
076cac204d2b5b1a278b68d3883394d5dcc2c040
refs/heads/main
2023-07-15T01:55:52.550274
2021-08-27T16:34:40
2021-08-27T16:34:40
382,106,004
0
0
null
null
null
null
UTF-8
Python
false
false
269
py
from django.urls import path from CBV.CBV_app.views import CBVList, Details, Create urlpatterns = [ path('', CBVList.as_view(), name='CBVList'), path('details/<int:pk>', Details.as_view(), name='details'), path('create', Create.as_view(), name='create') ]
[ "54206891+StefanDimitrovDimitrov@users.noreply.github.com" ]
54206891+StefanDimitrovDimitrov@users.noreply.github.com
e9441712b4248e5f710a43bc2f7edd338493dca2
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/bob/5250fabed22b4ae590e1fc2c10ff7469.py
1d05c10071c4c66d5b3f7de258699ffdc3b7ceb5
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
465
py
# # Skeleton file for the Python "Bob" exercise. # def hey(what): #Strip whitespace what=what.strip() if (what==''): return 'Fine. Be that way!' elif (any(c.isalpha() for c in what) and what.upper()==what): #if the string contains letters, and all letters are uppercase (i.e. applying .upper() doesn't change the string) return 'Whoa, chill out!' elif (what[-1]=='?'): #if asked a question, return sure. return 'Sure.' else: return 'Whatever.'
[ "rrc@berkeley.edu" ]
rrc@berkeley.edu
9f455b8429dea02b5edeba4e91d0ff72aa48f3b9
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
/alipay/aop/api/response/AlipayDataDataserviceHellobikeLogSyncResponse.py
da91fbf326c81f03835b551c7221a1f8723c99fd
[ "Apache-2.0" ]
permissive
alipay/alipay-sdk-python-all
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
1fad300587c9e7e099747305ba9077d4cd7afde9
refs/heads/master
2023-08-27T21:35:01.778771
2023-08-23T07:12:26
2023-08-23T07:12:26
133,338,689
247
70
Apache-2.0
2023-04-25T04:54:02
2018-05-14T09:40:54
Python
UTF-8
Python
false
false
742
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class AlipayDataDataserviceHellobikeLogSyncResponse(AlipayResponse): def __init__(self): super(AlipayDataDataserviceHellobikeLogSyncResponse, self).__init__() self._biz_code = None @property def biz_code(self): return self._biz_code @biz_code.setter def biz_code(self, value): self._biz_code = value def parse_response_content(self, response_content): response = super(AlipayDataDataserviceHellobikeLogSyncResponse, self).parse_response_content(response_content) if 'biz_code' in response: self.biz_code = response['biz_code']
[ "liuqun.lq@alibaba-inc.com" ]
liuqun.lq@alibaba-inc.com
8a01fbab7ddd08d936daca4f2b151f92b88385c5
e9ff112a590a2707e66c518328ba71a4d964846a
/train_scripts/train_c2i.py
f980e7fb7c32ab9e357e2dc4a9e4acdc8522b720
[ "MIT" ]
permissive
n644t031/fastMRI-kspace
60a6ca9679ede25f0db89f174647a8451a578331
4c484b3183e9f06838b5ee108af283611c2e1e77
refs/heads/master
2022-08-30T17:19:23.105996
2020-05-24T13:55:40
2020-05-24T13:55:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,068
py
import torch from torch import nn, optim from pathlib import Path from utils.run_utils import initialize, save_dict_as_json, get_logger, create_arg_parser from utils.data_loaders import create_prefetch_data_loaders from train.subsample import RandomMaskFunc, UniformMaskFunc from data.input_transforms import PreProcessCMG from data.output_transforms import PostProcessCMG from train.new_model_trainers.img_only import ModelTrainerIMG # from models.att_unet import UNet from models.unet_no_norm import UNet from metrics.new_1d_ssim import SSIMLoss, LogSSIMLoss def train_cmg_to_img(args): # Creating checkpoint and logging directories, as well as the run name. ckpt_path = Path(args.ckpt_root) ckpt_path.mkdir(exist_ok=True) ckpt_path = ckpt_path / args.train_method ckpt_path.mkdir(exist_ok=True) run_number, run_name = initialize(ckpt_path) ckpt_path = ckpt_path / run_name ckpt_path.mkdir(exist_ok=True) log_path = Path(args.log_root) log_path.mkdir(exist_ok=True) log_path = log_path / args.train_method log_path.mkdir(exist_ok=True) log_path = log_path / run_name log_path.mkdir(exist_ok=True) logger = get_logger(name=__name__) # Assignment inside running code appears to work. if (args.gpu is not None) and torch.cuda.is_available(): device = torch.device(f'cuda:{args.gpu}') logger.info(f'Using GPU {args.gpu} for {run_name}') else: device = torch.device('cpu') logger.info(f'Using CPU for {run_name}') # Saving peripheral variables and objects in args to reduce clutter and make the structure flexible. args.run_number = run_number args.run_name = run_name args.ckpt_path = ckpt_path args.log_path = log_path args.device = device save_dict_as_json(vars(args), log_dir=log_path, save_name=run_name) if args.random_sampling: mask_func = RandomMaskFunc(args.center_fractions, args.accelerations) else: mask_func = UniformMaskFunc(args.center_fractions, args.accelerations) input_train_transform = PreProcessCMG(mask_func, args.challenge, device, augment_data=args.augment_data, use_seed=False, crop_center=args.crop_center) input_val_transform = PreProcessCMG(mask_func, args.challenge, device, augment_data=False, use_seed=True, crop_center=args.crop_center) output_train_transform = PostProcessCMG(challenge=args.challenge, residual_acs=args.residual_acs) output_val_transform = PostProcessCMG(challenge=args.challenge, residual_acs=args.residual_acs) # DataLoaders train_loader, val_loader = create_prefetch_data_loaders(args) losses = dict( img_loss=LogSSIMLoss(filter_size=7).to(device) # img_loss=SSIMLoss(filter_size=7).to(device=device) # img_loss=nn.L1Loss() ) # model = UNet( # in_chans=30, out_chans=30, chans=args.chans, num_pool_layers=args.num_pool_layers, num_groups=args.num_groups, # negative_slope=args.negative_slope, use_residual=args.use_residual, interp_mode=args.interp_mode, # use_ca=args.use_ca, reduction=args.reduction, use_gap=args.use_gap, use_gmp=args.use_gmp).to(device) model = UNet(in_chans=30, out_chans=30, chans=args.chans, num_pool_layers=args.num_pool_layers, num_depth_blocks=args.num_depth_blocks, use_residual=args.use_residual, use_ca=args.use_ca, reduction=args.reduction, use_gap=args.use_gap, use_gmp=args.use_gmp).to(device) optimizer = optim.Adam(model.parameters(), lr=args.init_lr) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_red_epochs, gamma=args.lr_red_rate) trainer = ModelTrainerIMG(args, model, optimizer, train_loader, val_loader, input_train_transform, input_val_transform, output_train_transform, output_val_transform, losses, scheduler) try: trainer.train_model() except KeyboardInterrupt: trainer.writer.close() logger.warning('Closing summary writer due to KeyboardInterrupt.') if __name__ == '__main__': project_name = 'fastMRI-kspace' assert Path.cwd().name == project_name, f'Current working directory set at {Path.cwd()}, not {project_name}!' settings = dict( # Variables that almost never change. challenge='multicoil', data_root='/media/veritas/D/FastMRI', log_root='./logs', ckpt_root='./checkpoints', batch_size=1, # This MUST be 1 for now. save_best_only=True, smoothing_factor=8, # Variables that occasionally change. center_fractions=[0.08, 0.04], accelerations=[4, 8], random_sampling=True, num_pool_layers=5, verbose=False, use_gt=True, augment_data=True, crop_center=True, # Model specific parameters. train_method='C2I', # Weighted semi-k-space to complex-valued image. # num_groups=16, # Maybe try 16 now since chans is 64. chans=64, num_depth_blocks=1, # negative_slope=0.1, # interp_mode='nearest', use_residual=True, residual_acs=False, # TensorBoard related parameters. max_images=8, # Maximum number of images to save. shrink_scale=1, # Scale to shrink output image size. # Channel Attention. use_ca=False, reduction=8, use_gap=False, use_gmp=False, # Learning rate scheduling. lr_red_epochs=[20, 25], lr_red_rate=0.1, # Variables that change frequently. use_slice_metrics=True, num_epochs=30, gpu=0, # Set to None for CPU mode. num_workers=4, init_lr=2E-4, max_to_keep=1, prev_model_ckpt='', sample_rate_train=1, start_slice_train=0, sample_rate_val=1, start_slice_val=0, ) arguments = create_arg_parser(**settings).parse_args() train_cmg_to_img(arguments)
[ "veritas9872@gmail.com" ]
veritas9872@gmail.com
2c562bda934b4f62ab5ded428de590c8313dcc52
0b9e884be78ecc22a44a94e2c1cabefd637b9ed0
/Python_Talk/mpt-master/ch12/guess_letter.py
7c0efa9b99992a2bcda705aca8885edea8980335
[]
no_license
marcovnyc/penguin-code
6ba3faa5f21186918e2d08f5a0fcacebb2697e56
a0c1f91219ff74a8bb8e9fd3375b03b667056b54
refs/heads/master
2021-12-22T15:04:26.002512
2021-12-16T04:01:40
2021-12-16T04:01:40
7,264,458
0
0
null
null
null
null
UTF-8
Python
false
false
1,798
py
import turtle as t from random import choice # Set up the board t.setup(600,500) t.hideturtle() t.tracer(False) t.bgcolor("lavender") t.title("Guess the Word Game in Turtle Graphics") # Define a variable to count how many guesses left score = 6 # Create a second turtle to show guesses left left = t.Turtle() left.up() left.hideturtle() left.goto(-290,200) left.write(f"guesses left: {score}", font = ('Arial',20,'normal')) # Put incorrect guesses on top t.up() t.goto(-290,150) t.write("incorrect guesses:", font = ('Arial',20,'normal')) # Put four empty spaces for the four letters at bottom for x in range(4): t.goto(-275+150*x,-200) t.down() t.goto(-175+150*x,-200) t.up() t.update() # Put words in a dictionary and randomly pick one words = ['that', 'with', 'have', 'this', 'will', 'your', 'from', 'they', 'know', 'want', 'been', 'good', 'much', 'some', 'time'] word = choice(words) # Create a missed list missed = [] # Start the game loop while True: # Take written input inp = input("What's your guess?\n").lower() # Stop the loop if you key in "done" if inp == "done": break # Check if the letter is in the word elif inp in list(word): # If yes, put it in the right position(s) for w in range(4): if inp == list(word)[w]: t.goto(-250+150*w,-190) t.write(inp, font = ('Arial',60,'normal')) # If the letter is not in the word, show it at the top else: missed.append(inp) t.goto(-290+80*len(missed),60) t.write(inp, font = ('Arial',60,'normal')) # Update everything that happens in the iteration t.update() try: t.bye() except t.Terminator: print('exit turtle')
[ "penguin@penguin.com" ]
penguin@penguin.com
ee7d13e00cccdc88d49ad555a163e514fa7a1276
aad51b0ea59c38b23ed419e10b86c44aa947f117
/156/rrhood.py
cec768264e83a157a3edbe67b33fa6193bb99896
[]
no_license
berubejd/PyBites
3a1d7144f59f67a0996dbe224b69bc0b6da439d6
439446e8b67612a603713723b2d4a021677341d2
refs/heads/master
2021-07-14T14:03:51.819347
2020-11-01T14:59:02
2020-11-01T14:59:02
221,087,519
0
0
null
null
null
null
UTF-8
Python
false
false
5,346
py
#!/usr/bin/env python3.8 CHARACTERS = ['Red Riding Hood', # we're omitting 'mother' here for simplicity # (= substring grandmother) ('Grandmother', 'Grandma', 'Granny'), 'wolf', 'woodsman'] text = """ Once upon a time, there was a little girl who lived in a village near the forest. Whenever she went out, the little girl wore a red riding cloak, so everyone in the village called her Little Red Riding Hood. One morning, Little Red Riding Hood asked her mother if she could go to visit her grandmother as it had been awhile since they'd seen each other. "That's a good idea," her mother said. So they packed a nice basket for Little Red Riding Hood to take to her grandmother. When the basket was ready, the little girl put on her red cloak and kissed her mother goodbye. "Remember, go straight to Grandma's house," her mother cautioned. "Don't dawdle along the way and please don't talk to strangers! The woods are dangerous." "Don't worry, mommy," said Little Red Riding Hood, "I'll be careful." But when Little Red Riding Hood noticed some lovely flowers in the woods, she forgot her promise to her mother. She picked a few, watched the butterflies flit about for awhile, listened to the frogs croaking and then picked a few more. Little Red Riding Hood was enjoying the warm summer day so much, that she didn't notice a dark shadow approaching out of the forest behind her... Suddenly, the wolf appeared beside her. "What are you doing out here, little girl?" the wolf asked in a voice as friendly as he could muster. "I'm on my way to see my Grandma who lives through the forest, near the brook," Little Red Riding Hood replied. Then she realized how late she was and quickly excused herself, rushing down the path to her Grandma's house. The wolf, in the meantime, took a shortcut... The wolf, a little out of breath from running, arrived at Grandma's and knocked lightly at the door. "Oh thank goodness dear! Come in, come in! I was worried sick that something had happened to you in the forest," said Grandma thinking that the knock was her granddaughter. The wolf let himself in. Poor Granny did not have time to say another word, before the wolf gobbled her up! The wolf let out a satisfied burp, and then poked through Granny's wardrobe to find a nightgown that he liked. He added a frilly sleeping cap, and for good measure, dabbed some of Granny's perfume behind his pointy ears. A few minutes later, Red Riding Hood knocked on the door. The wolf jumped into bed and pulled the covers over his nose. "Who is it?" he called in a cackly voice. "It's me, Little Red Riding Hood." "Oh how lovely! Do come in, my dear," croaked the wolf. When Little Red Riding Hood entered the little cottage, she could scarcely recognize her Grandmother. "Grandmother! Your voice sounds so odd. Is something the matter?" she asked. "Oh, I just have touch of a cold," squeaked the wolf adding a cough at the end to prove the point. "But Grandmother! What big ears you have," said Little Red Riding Hood as she edged closer to the bed. "The better to hear you with, my dear," replied the wolf. "But Grandmother! What big eyes you have," said Little Red Riding Hood. "The better to see you with, my dear," replied the wolf. "But Grandmother! What big teeth you have," said Little Red Riding Hood her voice quivering slightly. "The better to eat you with, my dear," roared the wolf and he leapt out of the bed and began to chase the little girl. Almost too late, Little Red Riding Hood realized that the person in the bed was not her Grandmother, but a hungry wolf. She ran across the room and through the door, shouting, "Help! Wolf!" as loudly as she could. A woodsman who was chopping logs nearby heard her cry and ran towards the cottage as fast as he could. He grabbed the wolf and made him spit out the poor Grandmother who was a bit frazzled by the whole experience, but still in one piece."Oh Grandma, I was so scared!" sobbed Little Red Riding Hood, "I'll never speak to strangers or dawdle in the forest again." "There, there, child. You've learned an important lesson. Thank goodness you shouted loud enough for this kind woodsman to hear you!" The woodsman knocked out the wolf and carried him deep into the forest where he wouldn't bother people any longer. Little Red Riding Hood and her Grandmother had a nice lunch and a long chat. """ def make_character_index(text=text, characters=CHARACTERS): """Return a dict with keys are characters (lowercased) and values the lines they appear in sorted order. Matches should be case insensitive. If a character has multiple synonyms - e.g. ('Grandmother', 'Grandma', 'Granny') - then return the former as key. """ from collections import defaultdict index = defaultdict(list) line_number = 0 for line in text.lower().strip().splitlines(): line_number += 1 for character in characters: if isinstance(character, str): character = (character,) for syn in character: if syn.lower() in line: if not line_number in index[character[0].lower()]: index[character[0].lower()].append(line_number) return index print(make_character_index())
[ "berubejd@gmail.com" ]
berubejd@gmail.com
59ab9c4e25ae4f4d32c497ff085285a02ce8eae2
5b86d752424e303a5115ded25892776d3781bddf
/tools/forecast_data_select.py
c4ceec80ef4eccac6ff982ff5fac063d8bf44513
[]
no_license
GlassyWing/weather_predict_torch
3318e441768ebbaaac408cdd3c48133f7bc03495
0cbd0af4b787bb8624d65b9e31970161e29d05a2
refs/heads/master
2020-05-17T08:04:28.473337
2019-05-04T13:54:27
2019-05-04T13:54:27
183,597,527
0
0
null
null
null
null
UTF-8
Python
false
false
274
py
import pandas as pd if __name__ == '__main__': weather = pd.read_csv("../data/weather.csv") weather = weather[(weather['year'] == 2017) & (weather['month'] == 5) & (weather['county'] == '仁和')][::-1] weather.to_csv("../data/forecast_test.csv", index=False)
[ "1490215053@qq.com" ]
1490215053@qq.com
beb9bd4c2cd0d682ed67c40ad31749fdf94ea87d
432ea480327c3e0ce37d605d1c4ac29a8b653853
/src/visions/backends/python/types/file.py
2886a3bce3435a710a17540736818199fafd230b
[ "BSD-4-Clause", "BSD-2-Clause" ]
permissive
dylan-profiler/visions
3f7f99b06cc8a7b90cb4df988dbbec6c329a8e0a
a0b55bbf95e6efe001195e4b497358d6283966b5
refs/heads/develop
2022-11-27T01:17:01.735418
2022-10-30T10:44:37
2022-10-30T10:44:37
227,633,867
188
23
NOASSERTION
2022-10-05T23:06:31
2019-12-12T15:09:01
Python
UTF-8
Python
false
false
246
py
import pathlib from typing import Sequence from visions.types.file import File @File.contains_op.register def file_contains(sequence: Sequence, state: dict) -> bool: return all(isinstance(p, pathlib.Path) and p.exists() for p in sequence)
[ "sfbbrugman@gmail.com" ]
sfbbrugman@gmail.com
4b6960d22383d7c69b3c5e10c4b64ac23a1c83e8
230553326780c93f60d552a95d50018025724b4b
/py-code/迭代器.py
0fa3e53eaa24b154fab2aaba25a75e2ac9777164
[]
no_license
g-lyc/PRACTICE
55108dbeb75893e4e6631ce3968420f1da0266ef
492f72a1c14b53982ada478890e6a5487a67c46e
refs/heads/master
2022-08-21T17:33:29.682865
2022-08-15T02:58:17
2022-08-15T02:58:17
51,586,693
0
0
null
null
null
null
UTF-8
Python
false
false
553
py
#coding:utf-8 import os import sys # 生成器都是迭代器,迭代器不一定是生成器 l = [1,2,3,4,5] #可迭代对象,不是迭代器 d = iter(l) # l.__iter__() print(d) #<listiterator object at 0x0000000004DA5518> #什么是迭代器? #满足两个条件 1、有iter()方法 2、有next()方法 # for 循环内部三件事: # 1、 调用可迭代对象的iter方法返回一个迭代器对象 # 2、 不断调用迭代器对象的next方法 # 3、 处理StopIteration # Iterator 迭代器 # Iterable 可迭代对象
[ "309080979@qq.com" ]
309080979@qq.com
2b27287a81ce34c6bdfd3b3077d0ad39cac86b10
1f4204f903657884d9cccfd44b19ecb531b59ded
/test_settings/66_1_3_200.py
c083982ec3213706b72bcdccb9b5721887a8d8a3
[]
no_license
fmcc/StylometricAnalyser
795a8e4abe264ee18ab3bcb34bd128bcd06ac5ca
e86305a63c95d8b533cab4a3be0010c2fee0ff14
refs/heads/master
2021-01-23T08:38:44.961082
2013-08-31T20:23:36
2013-08-31T20:23:36
11,097,508
3
1
null
null
null
null
UTF-8
Python
false
false
540
py
import os DB_PATH = os.getcwd() + '/database/greek_texts.db' LOGGING = True DB_LOGGING = False NGRAM_WORDS = False NGRAM_LENGTHS = { 'MIN': 1, 'MAX': 3 } NO_SPACES = True RESTRICT_VECTOR_SPACE = 200 # If selected, texts will be divided according to their original top-level divisions (Books etc. 'div1' in Perseus's TEI XML USE_ORIGINAL_DIVISIONS = False #If USE_ORIGINAL_DIVISIONS is False, the text will be divided into chunks the length defined here. If O the text will not be divided. DIVISION_LENGTH = 5000
[ "finlaymccourt@gmail.com" ]
finlaymccourt@gmail.com
db000f476282e2536b72f3be77dc90f773225eb7
83544ef94ce2c1a05b6028ae2ce58ef8acfb6fa8
/pmca-console.spec
78c3eff77d7b1c5610639372fbb7074d05cd63eb
[ "MIT" ]
permissive
ma1co/Sony-PMCA-RE
9ae44c5b09580d62e860c3acff24bd1fac28a31e
a82f5baaa8e9c3d9f28f94699e860fb2e48cc8e0
refs/heads/master
2023-08-07T07:54:13.763912
2022-08-18T12:46:04
2022-08-18T12:46:04
35,510,548
1,788
228
MIT
2022-11-05T06:45:01
2015-05-12T20:18:25
Python
UTF-8
Python
false
false
177
spec
# Run `pyinstaller pmca-console.spec` to generate an executable input = 'pmca-console.py' output = 'pmca-console' console = True with open('build.spec') as f: exec(f.read())
[ "ma1co@users.noreply.github.com" ]
ma1co@users.noreply.github.com
20ed4c2c7261e9b4442d61d4810acd5d0b5743e6
385ab972316b41cb0643f1050f9220b8eaeb4647
/findDigits.py
41ec66dee85addf385ce1e7b73749ac75dcd3cb8
[]
no_license
Rpratik13/HackerRank
09174c9b331e25cec33848a80e9109800cdbc894
38b9a39261bfb3b2fc208ad1e3d8a485585b419a
refs/heads/master
2020-03-22T05:24:03.516086
2020-01-31T16:08:19
2020-01-31T16:08:19
139,563,106
0
0
null
null
null
null
UTF-8
Python
false
false
274
py
def findDigits(n): temp = n count = 0 while temp!=0: d = temp%10 if d == 0: temp = temp//10 continue if n%d==0: count+=1 temp = temp//10 return count t = int(input()) for t_itr in range(t): n = int(input()) result = findDigits(n) print(result)
[ "r.pratik013@gmail.com" ]
r.pratik013@gmail.com
a277c31b1f5861dc5d0a8cf2c169ac2f24ea9e6b
2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1
/树/验证二叉树.py
52f08e37e653a6186347565b6ffa4d6440299a4a
[]
no_license
tx991020/MyLeetcode
5b6121d32260fb30b12cc8146e44e6c6da03ad89
cfe4f087dfeb258caebbc29fc366570ac170a68c
refs/heads/master
2020-04-09T21:43:41.403553
2019-03-27T18:54:35
2019-03-27T18:54:35
160,611,089
0
0
null
null
null
null
UTF-8
Python
false
false
1,076
py
''' 给定一个二叉树,判断其是否是一个有效的二叉搜索树。 假设一个二叉搜索树具有如下特征: 节点的左子树只包含小于当前节点的数。 节点的右子树只包含大于当前节点的数。 所有左子树和右子树自身必须也是二叉搜索树。 示例 1: 输入: 2 / \ 1 3 输出: true 示例 2: 输入: 5 / \ 1 4 / \ 3 6 输出: false 解释: 输入为: [5,1,4,null,null,3,6]。 根节点的值为 5 ,但是其右子节点值为 4 。 ''' # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def isValidBST(self, root): """ :type root: TreeNode :rtype: bool """ inorder = self.inorder(root) return inorder == list(sorted(set(inorder))) def inorder(self, root): if root is None: return [] return self.inorder(root.left) + [root.val] + self.inorder(root.right)
[ "wudi@hetao101.com" ]
wudi@hetao101.com
60662f03bae36268062d3d500b1a75f366f385cd
91824d746654fe12881b4fc3b55c553aae0d22ac
/py/fizz-buzz.py
94d8d1eecae70b8c48ce696fb08d2f16527ea5db
[ "Apache-2.0" ]
permissive
ckclark/leetcode
a1a173c67a36a3256b198f853fcd3d15aa5abbb7
844c6f18d06dcb397db76436e5f4b8ddcb1beddc
refs/heads/master
2021-01-15T08:14:43.368516
2020-02-14T07:25:05
2020-02-14T07:30:10
42,386,911
0
0
null
null
null
null
UTF-8
Python
false
false
427
py
import sys class Solution(object): def fizzBuzz(self, n): """ :type n: int :rtype: List[str] """ ret = [] for i in xrange(1, n + 1): s = '' if i % 3 == 0: s += 'Fizz' if i % 5 == 0: s += 'Buzz' if i % 3 > 0 and i % 5 > 0: s += str(i) ret.append(s) return ret
[ "clark.ck@gmail.com" ]
clark.ck@gmail.com
40018adf13b332b79d6fb3be3827cb089212a630
610dedfb6e21d297e8cdbcba599a4e564bd785cb
/EstruturaDeRepeticao/estruturaderepeticao-29.py
5e9e0dce39884add04858ba5c6bdaa07905a59e9
[]
no_license
zumbipy/PythonExercicios
f7b2ddf2376b9ecb2aedc77531e3571dc746a12b
7a17b78cf927a2889b93238542e90e00810c43e6
refs/heads/master
2021-01-23T10:43:47.997462
2018-07-22T14:58:44
2018-07-22T14:58:44
93,086,120
1
1
null
null
null
null
UTF-8
Python
false
false
1,182
py
# Telegram: @ZumbiPy __ _ ___ # /_ / __ ____ _ / / (_) _ \__ __ # / /_/ // / ' \/ _ \/ / ___/ // / # /___/\_,_/_/_/_/_.__/_/_/ \_, / # E-mail: zumbipy@gmail.com /___/ """ 29 - O Sr. Manoel Joaquim possui uma grande loja de artigos de R$ 1,99, com cerca de 10 caixas. Para agilizar o cálculo de quanto cada cliente deve pagar ele desenvolveu um tabela que contém o número de itens que o cliente comprou e ao lado o valor da conta. Desta forma a atendente do caixa precisa apenas contar quantos itens o cliente está levando e olhar na tabela de preços. Você foi contratado para desenvolver o programa que monta esta tabela de preços, que conterá os preços de 1 até 50 produtos, conforme o exemplo abaixo: Lojas Quase Dois - Tabela de preços 1 - R$ 1.99 2 - R$ 3.98 ... 50 - R$ 99.50 """ # ================================================================================ # Logica e variavel. # ================================================================================ print("Lojas Quase Dois - Tabela de preços") for produtos in range(1, 51): valor = 1.99 * produtos print("{:>2} - R$ {:.2f}".format(produtos, valor))
[ "zumbipy@gmail.com" ]
zumbipy@gmail.com
ae3a0a0301704c185845a2ce5ed9681188f3d08e
9dee94907e6456a4af9855d358693923c17b4e0d
/0015_3Sum.py
159dafa89798c41df6a1700328fa7950dc93e7eb
[]
no_license
chien-wei/LeetCode
e215915a8103e56f182040dacc9fb0d6996c86ec
0d6f414e7610fedb2ec4818ecf88d51aa69e1355
refs/heads/master
2021-05-13T14:48:22.891100
2019-08-20T05:52:59
2019-08-20T05:52:59
116,749,327
0
0
null
null
null
null
UTF-8
Python
false
false
1,277
py
class Solution: def threeSum(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ res = [] nums.sort() self.nSum(nums, 0, 3, [], res) return res def nSum(self, nums, target, n, result, results): #print(nums, target, n, result, results) if len(nums) < n or n < 2: return if n == 2: l, r = 0, len(nums)-1 while r > l: if nums[l] + nums[r] == target: results.append(result + [nums[l], nums[r]]) l += 1 r -= 1 while r > l and nums[l] == nums[l-1]: l += 1 while r > l and nums[r] == nums[r+1]: r -= 1 elif nums[l] + nums[r] < target: l += 1 else: r -= 1 else: for i in range(len(nums)-n+1): if target < nums[i] * n or target > nums[-1] * n: break if i == 0 or i > 0 and nums[i-1] != nums[i]: self.nSum(nums[i+1:], target - nums[i], n-1, result+[nums[i]], results) return
[ "chien-wei@outlook.com" ]
chien-wei@outlook.com
d2029e11c506652cd6919211d3bae4de432c9fb8
ba2a05f20454bda428f140634bc602699f164fc4
/00.SSAFY/1.first-semester/07_django/API/api/settings.py
871671aa9e43287bfe37e62f0f3a9b4c9fa4751f
[]
no_license
snowink1137/TIL
734da402e99afa52f1af4ef996a6b274b1bcce0b
9e9c78eb0c892affc88e2d46e143cef98af743fb
refs/heads/master
2023-01-08T18:26:34.311579
2021-11-14T11:04:22
2021-11-14T11:04:22
162,255,934
0
0
null
2023-01-07T11:09:09
2018-12-18T08:32:44
Jupyter Notebook
UTF-8
Python
false
false
3,183
py
""" Django settings for api project. Generated by 'django-admin startproject' using Django 2.1.7. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'o+kn&$uu)urs-*1*6z_bj5#_q1ps$uws7fi$nx*asvmnv)!sbd' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'rest_framework', 'rest_framework_swagger', 'movie_api', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'api.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'api.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'ko-kr' TIME_ZONE = 'Asia/Seoul' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/'
[ "snowink1137@gmail.com" ]
snowink1137@gmail.com