blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d88e83ce61f5c1cfefddc7edc3506b16a9221549 | d8d0de926ac1a1de1abfca06aaed843d59f3674a | /python/easy/Solution_53.py | 3823a1f61d12b196772daf89164680284c13e3b6 | [] | no_license | nickest14/Leetcode-python | 75b4919d1ac45c6a7b008e6336db38a06e337bc7 | 435deadfe2b3936dd7848a384d2d9a364352268c | refs/heads/master | 2023-08-31T07:23:14.595745 | 2023-08-30T13:52:10 | 2023-08-30T13:52:10 | 188,695,066 | 0 | 0 | null | 2019-07-20T12:29:09 | 2019-05-26T14:43:12 | Python | UTF-8 | Python | false | false | 643 | py | # 53. Maximum Subarray
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ans = nums[0]
total = 0
for n in nums:
total += n
ans = max(ans, total)
if total < 0:
total = 0
return ans
# def maxSubArray(self, nums: List[int]) -> int:
# length = len(nums)
# f = [None for _ in range(length+1)]
# f[0] = 0
# for i in range(length):
# f[i+1] = max(f[i]+nums[i], nums[i])
# return max(f[1:])
ans = Solution().maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4])
print(ans)
| [
"nickest14@gmail.com"
] | nickest14@gmail.com |
d8f4e1dab4f396180784cd27edd3f97818f00ff4 | 275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc | /test/test_agreement.py | a23d4faa65adb95e44786545cc1594e00cee38cd | [] | no_license | cascadiarc/cyclos-python-client | 8029ce07174f2fe92350a92dda9a60976b2bb6c2 | a2e22a30e22944587293d51be2b8268bce808d70 | refs/heads/main | 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.agreement import Agreement # noqa: E501
from swagger_client.rest import ApiException
class TestAgreement(unittest.TestCase):
"""Agreement unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAgreement(self):
"""Test Agreement"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.agreement.Agreement() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"dan@leftcoastfs.com"
] | dan@leftcoastfs.com |
33daa2f6e6384682405e79bf9780044ac51c8720 | d914604923dffab7a612b95f7e1b7ebcab0e9bb3 | /insta/migrations/0001_initial.py | 86409c50e4e9b1efd73aabc3b1f744ec7c895782 | [] | no_license | MaryMbugua/Insta | 8ca4adaa356d249a0d945cec653cf70fd2fe1589 | a238cda86616cbfc26e95611ad63b7a42011ffc0 | refs/heads/master | 2020-03-18T03:27:33.141311 | 2018-05-23T16:05:06 | 2018-05-23T16:05:06 | 134,241,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-23 06:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pic', models.ImageField(blank=True, upload_to='images/')),
('caption', models.CharField(max_length=60, null=True)),
('likes', models.ManyToManyField(blank=True, related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(blank=True, upload_to='avatar/')),
('bio', tinymce.models.HTMLField()),
('first_name', models.CharField(max_length=30, null=True)),
('last_name', models.CharField(max_length=30, null=True)),
('following', models.ManyToManyField(blank=True, related_name='followed_by', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='insta.Profile'),
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insta.Image'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"marymbugua.nm@gmail.com"
] | marymbugua.nm@gmail.com |
d593edb29f5801f87b35e1ea86e83429d51735a0 | 215fd5c4f9893d9f38e4e48199ea16d7d6ef9430 | /9.Dynmaic_Programming/9.6_L91_Decode_Ways.py | 8f21ead3257f41860d9cc46e3d11e2e5d4e81dbb | [] | no_license | fztest/Classified | fd01622c097ca21b2e20285b06997ff0e9792dd1 | b046d94657c0d04f3803ca15437dfe9a6f6f3252 | refs/heads/master | 2020-03-25T06:34:07.885108 | 2017-05-04T17:22:36 | 2017-05-04T17:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | """
Description
______________
A message containing letters from A-Z is being encoded to numbers using the following mapping:
'A' -> 1
'B' -> 2
...
'Z' -> 26
Given an encoded message containing digits, determine the total number of ways to decode it.
For example,
Given encoded message "12", it could be decoded as "AB" (1 2) or "L" (12).
The number of ways decoding "12" is 2.
"""
class Solution:
# @param s, a string
# @return an integer
def numDecodings(self, s):
if s is None or len(s) == 0:
return 0
DP = [0 for _ in xrange(len(s))]
DP[0] = 0 if s[0] == '0' else 1
for i in xrange(1, len(s)):
first = s[i:i + 1]
second = s[i - 1:i + 1]
if int(first) != 0:
DP[i] += DP[i - 1]
if int(second) >= 10 and int(second) <= 26:
DP[i] += DP[i - 2] if i >= 2 else 1
return DP[-1]
| [
"cdzengpeiyun@gmail.com"
] | cdzengpeiyun@gmail.com |
c9ee749e1278aca262b9a42c087dbe576822bd2e | bfaf89bdb222b5b1f31aa4ef2a6466ca0125e225 | /students/view/logs.py | 1ebabdab75d4327d83f3c4782c224b271a81e1d8 | [] | no_license | smolynets/studentsdb13 | ab70506571150745753f04923c1a62457368ee03 | a76f9afa62ac6176a2f4dcea0098b6dd3986c3fc | refs/heads/master | 2021-01-17T22:03:14.145146 | 2017-03-07T08:33:38 | 2017-03-07T08:33:38 | 84,186,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from ..models.monthjournal import logentry
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from datetime import datetime
from django.contrib import messages
from datetime import datetime
from django.contrib.auth.decorators import login_required
@login_required
def logs(request):
logs = logentry.objects.order_by('asctime').reverse()
# paginate logs
paginator = Paginator(logs, 5)
page = request.GET.get('page')
try:
logs = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
logs = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver
# last page of results.
logs = paginator.page(paginator.num_pages)
return render(request, 'students/logs.html',
{'logs': logs}) | [
"smolynets@gmail.com"
] | smolynets@gmail.com |
b363b9ba1433e55ffe49fdb19adb2c5bade1ef05 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_0/Python/macieck9/c.py | 272d253ddc792afadcf3d720f6bab5d5693a2d4b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | def to_bin(n):
l = []
while n != 0:
l.append(n % 2)
n /= 2
l.reverse()
res = 0
for d in l:
res = 10 * res + d
return res
def to_dec(n, b):
l = str(n)
res = 0
for d in l:
res = b * res + int(d)
return res
def divisor(n):
for p in primes:
if n % p == 0:
return p
if p * p > n:
return -1
return -1
N = 1<<17
T = [0] * (N + 5)
primes = []
i = 2
while i*i <= N:
if T[i] == 1:
i += 1
continue
for j in range(i * i, N + 1, i):
T[j] = 1
i += 1
for i in range(2, N + 1):
if T[i] == 0:
primes.append(i)
ans = []
for i in range((1<<15) + 1, 1<<16, 2):
n = to_bin(i)
div = []
for j in range(2, 11):
x = to_dec(n, j)
d = divisor(x)
if d == -1:
break
div.append(d)
if len(div) == 9:
ans.append((n, div))
if len(ans) == 50:
break
print "Case #1:"
for (n, l) in ans:
print n,
for x in l:
print x,
print
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
0901a8decebfd2e16adbeb8a5654d44c7e9cc093 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/예외처리_20200709152901.py | 0f0b02c3f92435483b13b818496c0572d693295e | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | try:
print("나누기 전용 계산기입니다.")
nums = []
nums.append(int(input("첫 번째 숫자를 입력하세요 : ")))
nums.append(int(input("두 번째 숫자를 입력하세요 : ")))
nums.append(int(nums[0] / nums[1]))
print("{0} / {1} = {2}".format(num1, num2, int(num1/num2)))
except ValueError:
print("에러! 잘못된 값을 입력하였습니다.")
except ZeroDivisionError as err:
print(err)
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
91cb47500489f1a197928970caee8353d4676186 | 347299b495e1417dd4748c86f7696fb849c79c4f | /analisis_proyectos/servicios/configurador.py | 5fd1e564e54d15c1808c25abe4961a5136ee9eb8 | [] | no_license | vvalotto/python_uner | 6a379cc7ab4cb6380c193863403409e5559e1cd6 | 8fbeb9fde0883427695b5065232ace452160034f | refs/heads/master | 2020-07-25T23:32:50.829101 | 2019-10-18T18:02:16 | 2019-10-18T18:02:16 | 208,457,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,099 | py | """
Clase que inicializa y arma las instancias necesarias iniciales de la aplicaccion:
Contextos de datos
Respositorios
Gestores
"""
from analisis_proyectos.infraestructura.persistencia.contexto.contexto_database_sqlite import *
from analisis_proyectos.infraestructura.persistencia.repositorios.DB_repositorio_proyecto import *
from analisis_proyectos.infraestructura.persistencia.mapeador.proyecto import *
from analisis_proyectos.aplicacion.gestores.gestor_proyecto import *
from analisis_proyectos.aplicacion.gestores.gestor_componente import *
from analisis_proyectos.aplicacion.gestores.gestor_elemento import *
from analisis_proyectos.dominio.analitico.muestra import *
from analisis_proyectos.dominio.analitico.analizador import *
import os
directorio_base = os.path.abspath(os.path.dirname(__file__))
URI_DATABASE = 'sqlite:///' + os.path.join(directorio_base, 'proyectos.sqlite')
class Configurador:
contexto = ContextoDBSQLite(URI_DATABASE)
repositorio_proyecto = DBRepositorioProyecto(contexto, MapeadorDatosProyecto(contexto))
repositorio_componente = DBRepositorioComponente(contexto, MapeadorDatosComponente(contexto))
repositorio_elemento = DBRepositorioElemento(contexto, MapeadorDatosElemento(contexto))
gestor_proyecto = GestorProyecto()
gestor_proyecto.asignar_repositorio(repositorio_proyecto)
gestor_componente = GestorComponente()
gestor_componente.asignar_repositorio(repositorio_componente)
gestor_elemento = GestorElemento()
gestor_elemento.asignar_repositorio(repositorio_elemento)
muestra_proyectos=Muestra()
analizador_proyecto = Analizador(muestra_proyectos)
repositorio = "proyectos.sqlite"
datos_origen = "SELECT * FROM mediciones_proyecto;"
muestra_proyectos.cargar_valores_de_muestra(repositorio, datos_origen)
if __name__ == '__main__':
print(Configurador.contexto.recurso)
if Configurador.gestor_proyecto.existe_proyecto("Sistema de Gestión de Flota"):
proyecto = Configurador.gestor_proyecto.recuperar_proyecto_por_nombre("Sistema de Gestión de Flota")
print(proyecto)
| [
"vvalotto@gmail.com"
] | vvalotto@gmail.com |
9a4060829fa3fbbf0940f5bfb96aa01245a3d0c5 | 1bccf7d57c7aa8d48b84fff187de4b6ff2599cb6 | /pandora_rqt_gui/scripts/pandora_rqt_gui.py | c324b72f85afebafedfbb68e996684964493b158 | [] | no_license | skohlbr/pandora_ros_pkgs | 733ed34edb5b6d46e59df4acb01288f28ef3b50f | eecaf082b47e52582c5f009eefbf46dd692aba4f | refs/heads/indigo-devel | 2021-01-21T18:06:14.967943 | 2015-11-04T15:08:03 | 2015-11-04T15:08:03 | 53,413,573 | 0 | 1 | null | 2016-03-08T13:19:40 | 2016-03-08T13:19:40 | null | UTF-8 | Python | false | false | 140 | py | #!/usr/bin/env python
import sys
from rqt_gui.main import Main
main = Main()
sys.exit(main.main(sys.argv, standalone='pandora_rqt_gui'))
| [
"pandora@ee.auth.gr"
] | pandora@ee.auth.gr |
4fff3230f52f4f0a934e4f4430ae04de4a2e3c0a | 43b6bffc820d26dfd223728bed71241fb3d54983 | /abc/223/c.py | b26920383e241d8bef73911853f1633fb651f49a | [] | no_license | kiccho1101/atcoder | 3a163b6a38a62c578dad6d15ccb586d0fcd1e004 | c86cb8e08b881a0a01dc2ef538f0699f3951e897 | refs/heads/master | 2023-03-02T13:27:17.747402 | 2022-05-30T13:51:00 | 2022-05-30T13:51:00 | 223,152,693 | 1 | 0 | null | 2023-02-11T01:29:47 | 2019-11-21T10:52:49 | Python | UTF-8 | Python | false | false | 345 | py | N = int(input())
ab = [list(map(int, input().split())) for _ in range(N)]
secs = [a / b for a, b in ab]
middle = sum(secs) / 2
ans = 0
curr = 0
for i, (a, b) in enumerate(ab):
diff = middle - (curr + secs[i])
if diff > 0:
curr += secs[i]
ans += a
else:
ans += (middle - curr) * b
break
print(ans)
| [
"yodai.a.kishimoto@rakuten.com"
] | yodai.a.kishimoto@rakuten.com |
c125b0cb7fc1c7088739d00ba172ced46b39efe7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02909/s936658151.py | b112400ac8521188a69b312edffaf79281fe8a75 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | S = input()
weather = ['Sunny', 'Cloudy','Rainy']
if S == weather[0]:
print(weather[1])
elif S == weather[1]:
print(weather[2])
else:
print(weather[0]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1e92b2d38e080ce7e20b2e929c60746d9140b4ed | a18db39e7c392d9529f2cc4889e188310706b14f | /amadon_django/apps/amadon_app/migrations/0001_initial.py | 10c9629f6f602333a30d5560925895a7900a50db | [] | no_license | LeoKnox/amadon | d9f9d6234a3ebba12576115a02ee8ae222addb89 | f522f2c2e7ba655f8f11fe1984d49b9db799fbd3 | refs/heads/master | 2020-04-11T05:45:05.352715 | 2018-12-12T23:46:15 | 2018-12-12T23:46:15 | 161,558,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-12-12 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=255)),
('price', models.FloatField()),
],
),
]
| [
"noreply@github.com"
] | LeoKnox.noreply@github.com |
df112d0a858197f8660c215b773b0b219f73d5c7 | 0c9e35012baf61ee678bc719588b8cb2ccbe449e | /product/migrations/0228_auto_20180502_0957.py | 8a9605683776d8c9b4c9a98061df35772b9ed2bd | [] | no_license | rickyakilimali/approeco | 6f0f62d57b6e5361b5c5dd473038f2999bac1413 | fd96ca6d70dabf20668d2a582c67e5d409a4a097 | refs/heads/master | 2018-09-21T12:44:27.414394 | 2018-06-06T16:35:40 | 2018-06-06T16:35:40 | 113,836,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-05-02 09:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0227_auto_20180502_0954'),
]
operations = [
migrations.AlterField(
model_name='productiontournage',
name='nombre_minute',
field=models.CharField(choices=[('0-10', '0-10'), ('10-20', '10-20'), ('20-40', '20-40'), ('40-80', '40-80')], max_length=20, verbose_name='DUREE DU TOURNAGE(MINUTE)'),
),
]
| [
"ricky.akilimali@approeco.net"
] | ricky.akilimali@approeco.net |
6160f5c334c7db26952d00d3e77126d97da0f263 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/SevenTeV/RSGravitonToWW_kMpl01_M_2000_TuneZ2_7TeV_pythia6_cff.py | 44240dee8666db40ad085eb0bf7cb53b891f710d | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,223 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2Settings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
comEnergy = cms.double(7000.0),
crossSection = cms.untracked.double(1.83e-3),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSEL = 0',
'MSUB(391) = 1',
'MSUB(392) = 1',
'PMAS(347,1) = 2000',
'PARP(50) = 0.54', #0.54
'5000039:ALLOFF',
'5000039:ONIFANY 24',
),
parameterSets = cms.vstring(
'pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision: 1.1 $'),
name = cms.untracked.string('\$Source: /cvs/CMSSW/UserCode/hinzmann/production/RSGravitonToWW_kMpl01_M_2000_TuneZ2_7TeV_pythia6_cff.py,v $'),
annotation = cms.untracked.string('Fall2011 sample with PYTHIA6: RSG -> WW, TuneZ2')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-45889d0b422ced4b08fb223eae59c585c331ccec@cern.ch"
] | sha1-45889d0b422ced4b08fb223eae59c585c331ccec@cern.ch |
a274df74a04971717b3273284e2577410940beae | cb4d2629eadfafb4ffbcea8087399acb4f35cf00 | /mycalendar/serializers.py | a44370c64247b8d713cd71a8eb00701e76792d99 | [] | no_license | rdahal35/django_fullcalendar | 576023fa348391082ee82b50e27772b9c11c7b47 | 35623f562642816b9f501ea1390a03d96c0d188a | refs/heads/master | 2022-12-13T12:32:33.098739 | 2018-08-13T10:28:35 | 2018-08-13T10:28:35 | 142,756,016 | 0 | 1 | null | 2022-12-08T02:19:24 | 2018-07-29T11:40:13 | JavaScript | UTF-8 | Python | false | false | 182 | py | from rest_framework import serializers
from .models import Event
class eventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = '__all__' | [
"rdahal35@gmail.com"
] | rdahal35@gmail.com |
a79bfdb21a8f6a40a5c9ace790f401e9e1725607 | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /1501-2000/1678.Goal Parser Interpretation.py | 4653442e30b404a724cb952adacb2f49bdd3b8e1 | [] | no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 552 | py | class Solution(object):
def interpret(self, command):
"""
:type command: str
:rtype: str
"""
def tokenize(command):
i = 0
while i < len(command):
if command[i] == "G":
yield "G"
i += 1
elif command[i + 1] == ")":
yield "o"
i += 2
else:
yield "al"
i += 4
return "".join(token for token in tokenize(command))
| [
"skw_kevin@126.com"
] | skw_kevin@126.com |
1a7cb4284d1fd36b31dba154ea268d380e9ed4f4 | 0d1c1a216b01f6773e751691e9d3e10cc4f27d09 | /tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_ops_benchmark.py | 8d5ff341acd737162334c2f2a8f4c81db9db82d2 | [
"Apache-2.0"
] | permissive | abdo5520/tensorflow | 13c1496e7aa115bba06cda5fc9dc73ba9e4b1694 | 55b01593515817992821423fec19733bca91c918 | refs/heads/master | 2021-01-13T04:05:38.763884 | 2017-01-01T13:10:05 | 2017-01-01T13:10:05 | 77,894,045 | 0 | 1 | null | 2017-01-03T07:28:02 | 2017-01-03T07:28:02 | null | UTF-8 | Python | false | false | 6,958 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
flags.DEFINE_integer("batch_size", 64, "batch size.")
FLAGS = flags.FLAGS
class CudnnRNNBenchmark(test.Benchmark):
"""Benchmarks Cudnn LSTM and other related models.
"""
def _GetTestConfig(self):
return {
"large": {
"num_layers": 4,
"num_units": 1024,
"seq_length": 40,
"batch_size": 64,
},
"medium": {
"num_layers": 4,
"num_units": 512,
"seq_length": 30,
"batch_size": 64,
},
"small": {
"num_layers": 4,
"num_units": 128,
"seq_length": 20,
"batch_size": 64,
},
}
def _GetConfigDesc(self, config):
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
return "y%d_u%d_b%d_q%d" % (num_layers, num_units, batch_size, seq_length)
def _BenchmarkOp(self, op, desc):
burn_in_steps = 10
benchmark_steps = 40
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for i in xrange(burn_in_steps + benchmark_steps):
if i == burn_in_steps:
start_time = time.time()
sess.run(op)
total_time = time.time() - start_time
step_time = total_time / benchmark_steps
print("%s takes %.4f sec/step" % (desc, step_time))
self.report_benchmark(
name=desc, iters=benchmark_steps, wall_time=total_time)
def benchmarkCudnnLSTMTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
config = test_configs[config_name]
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/gpu:0"):
model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units)
params_size_t = model.params_size()
input_data = variables.Variable(
array_ops.ones([seq_length, batch_size, num_units]))
input_h = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
input_c = variables.Variable(
array_ops.ones([num_layers, batch_size, num_units]))
params = variables.Variable(
array_ops.ones([params_size_t]), validate_shape=False)
output, output_h, output_c = model(
is_training=True,
input_data=input_data,
input_h=input_h,
input_c=input_c,
params=params)
all_grads = gradients_impl.gradients(
[output, output_h, output_c],
[params, input_data, input_h, input_c])
training_op = control_flow_ops.group(*all_grads)
self._BenchmarkOp(training_op, "cudnn_lstm %s %s" %
(config_name, self._GetConfigDesc(config)))
def benchmarkTfRNNLSTMTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/gpu:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = core_rnn_cell_impl.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = core_rnn_cell_impl.MultiRNNCell([cell] * num_layers)
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm %s %s" %
(config_name, self._GetConfigDesc(config)))
def benchmarkTfRNNLSTMBlockCellTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/gpu:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
cell = lstm_ops.LSTMBlockCell(num_units=num_units)
multi_cell = core_rnn_cell_impl.MultiRNNCell([cell] * num_layers)
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
(config_name, self._GetConfigDesc(config)))
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
102be23096fba5b7196ac3e0c929f4ac9631cd95 | eb82022c0cfc7c8747661cff9624ad2099fa1c3f | /dev_bc/models/product_uom.py | 77461ba74a7add5a24d3a1415a7831e4131454f2 | [] | no_license | dadysuarsa/Odoo | 8d026a066c390cc8f72805d2672212e61260c1cb | c9becd0c192fa239520ad3e1a11d81f70832eddf | refs/heads/master | 2023-03-11T06:02:06.011575 | 2021-02-26T02:17:37 | 2021-02-26T02:17:37 | 276,346,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from odoo import models, fields, api, _
import openerp.addons.decimal_precision as dp
from odoo.exceptions import UserError
class productuom(models.Model):
_inherit = 'product.uom'
name_bc = fields.Char('Unit of bc') | [
"dads02_zetti@yahoo.com"
] | dads02_zetti@yahoo.com |
294939bed03799b74ad9da32b6d03e81286ce9ed | 8a83bb7acb9b62183fca817e1f196dd8075630a4 | /24_fourthFolder/18_lazy_propagation.py | 4435a51def47f2397589dc377acdb639e7fffb40 | [] | no_license | sandeepkumar8713/pythonapps | ff5ad3da854aa58e60f2c14d27359f8b838cac57 | 5dcb5ad4873124fed2ec3a717bfa379a4bbd197d | refs/heads/main | 2023-09-01T04:12:03.865755 | 2023-08-31T07:04:58 | 2023-08-31T07:04:58 | 234,762,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | # https://www.geeksforgeeks.org/lazy-propagation-in-segment-tree/
# Question : Write a function which takes start and end index and return their range sum. We
# have used the same "Sum of given Range" problem to explain Lazy propagation. He then asked
# range update in best optimised way. I told him Lazy propagation. I had to write code for
# segment tree with lazy propagation.
#
# Question Type : Generic
# Used : updateRange(us, ue)
# 1) If current segment tree node has any pending update, then first add that
# pending update to current node.
# 2) If current node's range lies completely in update query range.
# a) Update current node
# b) Postpone updates to children by setting lazy value for children nodes.
# 3) If current node's range overlaps with update range, follow the same approach as above simple update.
# a) Recur for left and right children.
# b) Update current node using results of left and right calls.
# Logic :
# def getSumUtil(self, segStart, segEnd, queryStart, queryEnd, index):
# if self.lazy[index] != 0:
# self.tree[index] += (segEnd - segStart + 1) * self.lazy[index]
# if segStart != segEnd:
# self.lazy[index * 2 + 1] += self.lazy[index]
# self.lazy[index * 2 + 2] += self.lazy[index]
# self.lazy[index] = 0
# if segStart > segEnd or segStart > queryEnd or segEnd < queryStart: return 0
# if segStart >= queryStart and segEnd <= queryEnd: return self.tree[index]
# mid = (segStart + segEnd) / 2
# return self.getSumUtil(segStart, mid, queryStart, queryEnd, index * 2 + 1) + \
# self.getSumUtil(mid + 1, segEnd, queryStart, queryEnd, index * 2 + 2)
#
# def updateRangeUtil(self, segStart, segEnd, queryStart, queryEnd, index, diff):
# if self.lazy[index] != 0:
# self.tree[index] += (segEnd - segStart + 1) * self.lazy[index]
# if segStart != segEnd:
# self.lazy[index * 2 + 1] += self.lazy[index]
# self.lazy[index * 2 + 2] += self.lazy[index]
# self.lazy[index] = 0
# No overlap b/w segment and query range
# if segStart > segEnd or segStart > queryEnd or segEnd < queryStart:
# return 0
# Query start/end is more than segment range
# if segStart >= queryStart and segEnd <= queryEnd:
# self.tree[index] += (segEnd - segStart + 1) * diff
# if segStart != segEnd:
# self.lazy[index * 2 + 1] += diff
# self.lazy[index * 2 + 2] += diff
# return
# mid = (segStart + segEnd) // 2
# self.updateRangeUtil(segStart, mid, queryStart, queryEnd, index * 2 + 1, diff)
# self.updateRangeUtil(mid + 1, segEnd, queryStart, queryEnd, index * 2 + 2, diff)
# self.tree[index] = self.tree[index * 2 + 1] + self.tree[index * 2 + 2]
# Complexity : Tree construction O(n) Update O(log n) Sum O(log n)
MAX = 1000
class LazySegmentTree:
def __init__(self):
self.tree = [0] * MAX
self.lazy = [0] * MAX
self.size = -1
def constructUtil(self, inpArr, segStart, segEnd, index):
if segStart > segEnd:
return
if segStart == segEnd:
self.tree[index] = inpArr[segStart]
return
mid = (segStart + segEnd) // 2
self.constructUtil(inpArr, segStart, mid, index * 2 + 1)
self.constructUtil(inpArr, mid + 1, segEnd, index * 2 + 2)
self.tree[segStart] = self.tree[segStart * 2 + 1] + self.tree[segStart * 2 + 2]
def construct(self, inpArr):
self.size = len(inpArr)
self.constructUtil(arr, 0, self.size - 1, 0)
def getSumUtil(self, segStart, segEnd, queryStart, queryEnd, index):
if self.lazy[index] != 0:
self.tree[index] += (segEnd - segStart + 1) * self.lazy[index]
if segStart != segEnd:
self.lazy[index * 2 + 1] += self.lazy[index]
self.lazy[index * 2 + 2] += self.lazy[index]
self.lazy[index] = 0
if segStart > segEnd or segStart > queryEnd or segEnd < queryStart:
return 0
# If this segment lies in range
if segStart >= queryStart and segEnd <= queryEnd:
return self.tree[index]
mid = (segStart + segEnd) // 2
return self.getSumUtil(segStart, mid, queryStart, queryEnd, index * 2 + 1) + \
self.getSumUtil(mid + 1, segEnd, queryStart, queryEnd, index * 2 + 2)
def getSum(self, queryStart, queryEnd):
if queryStart < 0 or queryEnd > self.size - 1 or queryStart > queryEnd:
print("Invalid Input")
return -1
return self.getSumUtil(0, self.size - 1, queryStart, queryEnd, 0)
def updateRangeUtil(self, segStart, segEnd, queryStart, queryEnd, index, diff):
if self.lazy[index] != 0:
self.tree[index] += (segEnd - segStart + 1) * self.lazy[index]
if segStart != segEnd:
self.lazy[index * 2 + 1] += self.lazy[index]
self.lazy[index * 2 + 2] += self.lazy[index]
self.lazy[index] = 0
if segStart > segEnd or segStart > queryEnd or segEnd < queryStart:
return 0
# If this segment lies in range
if segStart >= queryStart and segEnd <= queryEnd:
self.tree[index] += (segEnd - segStart + 1) * diff
if segStart != segEnd:
self.lazy[index * 2 + 1] += diff
self.lazy[index * 2 + 2] += diff
return
mid = (segStart + segEnd) // 2
self.updateRangeUtil(segStart, mid, queryStart, queryEnd, index * 2 + 1, diff)
self.updateRangeUtil(mid + 1, segEnd, queryStart, queryEnd, index * 2 + 2, diff)
self.tree[index] = self.tree[index * 2 + 1] + self.tree[index * 2 + 2]
def updateRange(self, queryStart, queryEnd, diff):
self.updateRangeUtil(0, self.size - 1, queryStart, queryEnd, 0, diff)
if __name__ == "__main__":
arr = [1, 3, 5, 7, 9, 11]
lazySegmentTree = LazySegmentTree()
lazySegmentTree.construct(arr)
print(lazySegmentTree.getSum(1, 3))
lazySegmentTree.updateRange(1, 5, 10)
print(lazySegmentTree.getSum(1, 3))
| [
"sandeepkumar8713@gmail.com"
] | sandeepkumar8713@gmail.com |
570f37233f49a95dccabcfab99c73b34aed7b8a1 | fe488ec29223d32d0d94295e838517b7e8cf9c7d | /ghidra/scripts/find_duplicate_functions.py | eefaf1e3448e6d38d520887ec12f49284de1acca | [
"MIT"
] | permissive | qeedquan/debug | 4ad1fd9c2f484190a0a64725653e47172e7595c6 | aadeb3351f832bbd7210f0512037c93e48153062 | refs/heads/master | 2023-05-10T17:06:50.463693 | 2023-05-01T01:33:39 | 2023-05-01T01:33:39 | 87,041,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | #@author
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
class Func:
def __init__(self):
self.func = None
self.body = []
self.hash = ""
def getfuncs():
listing = currentProgram.getListing()
result = []
func = getFirstFunction()
while func is not None:
f = Func()
f.func = func
f.body = list(listing.getCodeUnits(func.getBody(), True))
for op in f.body:
f.hash += op.toString().split(" ")[0]
result.append(f)
func = getFunctionAfter(func)
return result
def getdiffs(funcs):
dups = {}
for f in funcs:
key = f.hash
if key not in dups:
dups[key] = []
dups[key].append(f.func)
for key in dups:
if len(dups[key]) > 1:
print(dups[key])
getdiffs(getfuncs())
| [
"qeed.quan@gmail.com"
] | qeed.quan@gmail.com |
2c8eb5561dfa8bcd89c70ae82192ec5011775a7f | 1f190e0290513ede543c370b0428dff8079e32ed | /clusters/haswell/submit_dalton.py | 7010c4b656d761c5a57f7b5154d2253d5287f643 | [] | no_license | Computational-Chemistry-Research/personal_scripts | 535062bc402088d1fd0ccbabae906eb6e7a65e84 | 0b51032582a2ee55b06a150009bb898e2b976606 | refs/heads/master | 2023-02-03T10:02:18.612244 | 2020-12-25T15:20:37 | 2020-12-25T15:20:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,882 | py | #!/usr/bin/env python
"""submit_dalton.py: A standalone script for submitting DALTON jobs to
Haswell's SLURM scheduler.
"""
def template_slurmfile_dalton(inpfile, ppn, time, extrafiles):
"""The template for a SLURM jobfile that calls DALTON."""
copy_string_template = 'cp "$SLURM_SUBMIT_DIR"/{} "$LOCAL"\n'
if extrafiles is None:
joined_extrafiles = ""
elif isinstance(extrafiles, list):
copy_strings = []
for extrafile in extrafiles:
copy_string = copy_string_template.format(extrafile)
copy_strings.append(copy_string)
joined_extrafiles = "".join(copy_strings)
else:
joined_extrafiles = copy_string_template.format(extrafiles)
module = 'dalton/2016.2-i2017.1-mkl_parallel-omp'
return '''#!/bin/bash
#SBATCH --job-name={inpfile}
#SBATCH --output={inpfile}.slurmout
#SBATCH --nodes=1
#SBATCH --ntasks-per-node={ppn}
#SBATCH --time=0-{time}:00:00
module purge
module load intel/2017.1.132
module load mkl/2017.1.132
module load {module}
mkdir -p "$LOCAL"
cp "$SLURM_SUBMIT_DIR"/{inpfile}.dal "$LOCAL"
{extrafiles}cd "$LOCAL"
run_on_exit() {{
set -v
find "$LOCAL" -type f -exec chmod 644 '{{}}' \;
cp -v -R "$LOCAL"/DALTON_scratch_{username}/* "$SLURM_SUBMIT_DIR"
}}
trap run_on_exit EXIT
$(which dalton) -omp {ppn} -noarch -nobackup -d -ow -w "$SLURM_SUBMIT_DIR" {inpfile}.dal
chmod 644 "$SLURM_SUBMIT_DIR"/{inpfile}.out
'''.format(inpfile=inpfile,
ppn=ppn,
time=time,
module=module,
username=os.environ['USER'],
extrafiles=joined_extrafiles)
if __name__ == "__main__":
import argparse
import os.path
parser = argparse.ArgumentParser()
parser.add_argument('inpfilename',
help='the DALTON input file to submit',
nargs='*')
parser.add_argument('--ppn',
type=int,
default=12,
help='number of cores to run on (max 12)')
parser.add_argument('--time',
type=int,
default=24,
help='walltime to reserve (max 144 hours)')
parser.add_argument('--extrafiles',
help='An arbitrary number of files to copy to $LOCAL.',
nargs='*')
args = parser.parse_args()
for inpfilename in args.inpfilename:
inpfilename = os.path.splitext(inpfilename)[0]
slurmfilename = inpfilename + '.slurm'
with open(slurmfilename, 'w') as slurmfile:
slurmfile.write(template_slurmfile_dalton(inpfilename,
args.ppn,
args.time,
args.extrafiles))
print(slurmfilename)
| [
"eric.berquist@gmail.com"
] | eric.berquist@gmail.com |
a963122d803d9c95d2f4da26529d3c3263e17c97 | 8935286746ba7d98e69f28343498a20303b8fbef | /tests/problem_difference_operators/test_Dcd.py | 11d01a7860d1dd2af0fcf3104146558a36b2498c | [] | no_license | ASU-CompMethodsPhysics-PHY494/Activity_11_differential_operators | dceac220f1a68addc8a4b1720793a62bdf805038 | bec1e9064d24364fa6e7013b4719c3d48e9e3529 | refs/heads/main | 2023-03-12T15:45:59.381399 | 2021-03-04T10:09:50 | 2021-03-04T10:09:50 | 344,430,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from ..base import _TestDxx
class TestDcd(_TestDxx):
name = "D_cd"
@staticmethod
def op(f, x, dx):
dx2 = dx/2
return (f(x + dx2) - f(x - dx2))/dx
| [
"orbeckst@gmail.com"
] | orbeckst@gmail.com |
ca62d5843d222ded6598b0f8b2af7ca737d437d6 | 12579725d1c1e51a436136f465a8a7e60a76248b | /apps/almacen/views.py | a054d12a7d23f2f3f1b6d09e0dc0930306a1e707 | [] | no_license | kiritodeveloper/optica | f4f43b9ead720e0708974736ba6a798e7980ccb8 | 6491c4b6b074a1e1f6d7a5b2d73f85e7ed11bedd | refs/heads/master | 2023-01-06T14:00:20.337601 | 2019-07-07T00:42:21 | 2019-07-07T00:42:21 | 176,391,884 | 3 | 0 | null | 2022-12-26T20:15:18 | 2019-03-19T00:33:16 | TSQL | UTF-8 | Python | false | false | 6,978 | py | # -*- encoding: utf-8 -*-
from django.shortcuts import render,redirect, HttpResponse
from django.views.generic import View, ListView, DeleteView
from apps.cliente.models import Cliente
from apps.facturacion.models import Venta
from apps.receta.models import Receta
from .models import Producto,Proveedor
from .forms import ProductoForm,IngresoProductosForm
from django.contrib import messages
from apps.usuarios.views import LoginRequiredMixin
from decimal import Decimal
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class Index(LoginRequiredMixin,View):
template_name = 'index.html'
def get(self,request):
# Calculando 6 meses anterior y sus totales
import datetime
# nombre_meses = { 1:"Enero",2:"Febrero",3:"Marzo",4:"Abril",5:"Mayo",6:"Junio",7:"Julio",8:"Agosto",9:"Setiembre",10:"Octubre", 11:"Noviembre",12:"Diciembre" }
# contador = [0,0,0,0,0,0]
# totales = [Decimal(0),Decimal(0),Decimal(0),Decimal(0),Decimal(0),Decimal(0)]
# receta = Receta.objects.all().order_by('-fecha')
# lista = []
# for item in receta:
# diff = (datetime.date.today() - item.fecha).days
# if(int(diff/30) >= 8):
# lista.append(item)
# receta = lista
# ventas = Venta.objects.all()
# suma_mes = Decimal(0)
# suma_dia = Decimal(0)
# flag = False
# meses = [[datetime.date.today().month,datetime.date.today().year],]
# #Obtener los ultimos 6 meses
# if (datetime.date.today().month - 1 >0 and flag == False):
# meses.append([datetime.date.today().month-1,datetime.date.today().year])
# else:
# meses.append([12,datetime.date.today().year-1])
# flag = True
# if (datetime.date.today().month - 2 >0 and flag == False):
# meses.append([datetime.date.today().month-2,datetime.date.today().year])
# else:
# meses.append([11,datetime.date.today().year-1])
# flag = True
# if (datetime.date.today().month - 3 >0 and flag == False):
# meses.append([datetime.date.today().month-3,datetime.date.today().year])
# else:
# meses.append([10,datetime.date.today().year-1])
# flag = True
# if (datetime.date.today().month - 4 >0 and flag == False):
# meses.append([datetime.date.today().month-4,datetime.date.today().year])
# else:
# meses.append([9,datetime.date.today().year-1])
# flag = True
# if (datetime.date.today().month - 5 >0 and flag == False):
# meses.append([datetime.date.today().month-5,datetime.date.today().year])
# else:
# meses.append([8,datetime.date.today().year-1])
# flag = True
#
# for item in ventas:#Calcular totales del dia y del mes
# if item.fecha == datetime.date.today():
# suma_dia += Decimal(item.total or 0)
# if (item.fecha.month == datetime.date.today().month) and (item.fecha.year == datetime.date.today().year):
# suma_mes += Decimal(item.total or 0)
# #Cacular totales para los 6 meses
# if (item.fecha.month == meses[0][0]) and (item.fecha.year == meses[0][1]):
# totales[0] += Decimal(item.total or 0)
# contador[0] += 1
# if (item.fecha.month == meses[1][0]) and (item.fecha.year == meses[1][1]):
# totales[1] += Decimal(item.total or 0)
# contador[1] += 1
# if (item.fecha.month == meses[2][0]) and (item.fecha.year == meses[2][1]):
# totales[2] += Decimal(item.total or 0)
# contador[2] += 1
# if (item.fecha.month == meses[3][0]) and (item.fecha.year == meses[3][1]):
# totales[3] += Decimal(item.total or 0)
# contador[3] += 1
# if (item.fecha.month == meses[4][0]) and (item.fecha.year == meses[4][1]):
# totales[4] += Decimal(item.total or 0)
# contador[4] += 1
# if (item.fecha.month == meses[5][0]) and (item.fecha.year == meses[5][1]):
# totales[5] += Decimal(item.total or 0)
# contador[5] += 1
#
# #Renderizando datos a json
# import json
# index = 0
# for item in meses:
# meses[index] = "%s - %s" %(str(nombre_meses[item[0]]),str(item[1]))
# index +=1
# meses = json.dumps(meses)
# contador = json.dumps(contador)
# index = 0
# for item in totales:
# totales[index] = float(totales[index])
# index+=1
# totales = json.dumps(totales)
# # Clientes de cumpleaños
clientes = Cliente.objects.filter(fecha_nacimiento__month=datetime.date.today().month, fecha_nacimiento__day=datetime.date.today().day)
return render(request,self.template_name,locals())
class Productos(LoginRequiredMixin,View):
template_name = 'productos/index.html'
def get(self,request):
productos = Producto.objects.all()
producto_form = ProductoForm()
ingreso_form = IngresoProductosForm()
return render(request,self.template_name,locals())
def post(self,request):
productos = Producto.objects.all()
producto_form = ProductoForm(request.POST)
ingreso_form = IngresoProductosForm(request.POST)
if producto_form.is_valid():
producto = producto_form.save()
messages.success(request, unicode('El producto '+unicode(producto.descripcion)+' de código '+unicode(producto.codigo)+' fue registrado con exito'))
productos = Producto.objects.all()
producto_form = ProductoForm()
ingreso_form = IngresoProductosForm()
return render(request,self.template_name,locals())
elif(ingreso_form.is_valid()):
historial = ingreso_form.save()
producto = Producto.objects.get(pk=historial.producto.id)
producto.stock_actual += int(request.POST['cantidad'])
producto.save()
messages.success(request, 'Se ingresaron '+request.POST['cantidad']+' unidades de '+producto.descripcion)
productos = Producto.objects.all()
producto_form = ProductoForm()
ingreso_form = IngresoProductosForm()
return render(request,self.template_name,locals())
else:
messages.error(request, 'No se pudo registrar la operación, porfavor intente denuevo.')
return render(request,self.template_name,locals())
import json
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def ObtenerProducto(request,nro):
item = Producto.objects.get(pk=nro)
return HttpResponse(json.dumps({"precio":float(item.precio_sugerido),"max_value":item.stock_actual}),content_type='application/json')
| [
"admin@example.com"
] | admin@example.com |
6cb58c3d103adce06bc3212805179da117d0586a | 87d0de6a06451d7aa561b72d908d06e68074f650 | /core/arxiv/submission/auth.py | 2bc77e6859adcdde806a42175c0522339435c9cd | [
"MIT"
] | permissive | arXiv/arxiv-submission-core | 3e79085ee408fd83b4dd4c0c1e8ccc53dd282230 | 6077ce4e0685d67ce7010800083a898857158112 | refs/heads/develop | 2022-01-21T02:11:56.384920 | 2020-07-31T18:16:17 | 2020-07-31T18:16:17 | 106,854,828 | 14 | 8 | MIT | 2022-01-06T22:29:31 | 2017-10-13T17:36:51 | Python | UTF-8 | Python | false | false | 1,330 | py |
from typing import List
import uuid
from datetime import datetime, timedelta
from pytz import UTC
from arxiv.users import auth, domain
from arxiv.base.globals import get_application_config
from .domain.agent import User, Agent, Client
def get_system_token(name: str, agent: Agent, scopes: List[str]) -> str:
start = datetime.now(tz=UTC)
end = start + timedelta(seconds=36000)
if isinstance(agent, User):
user = domain.User(
username=agent.username,
email=agent.email,
user_id=agent.identifier,
name=agent.name,
verified=True
)
else:
user = None
session = domain.Session(
session_id=str(uuid.uuid4()),
start_time=datetime.now(), end_time=end,
user=user,
client=domain.Client(
owner_id='system',
client_id=name,
name=name
),
authorizations=domain.Authorizations(scopes=scopes)
)
secret = get_application_config()['JWT_SECRET']
return str(auth.tokens.encode(session, secret))
def get_compiler_scopes(resource: str) -> List[str]:
"""Get minimal auth scopes necessary for compilation integration."""
return [auth.scopes.READ_COMPILE.for_resource(resource),
auth.scopes.CREATE_COMPILE.for_resource(resource)]
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
f9f5f1c810bd6768490f56eed7d994bb84fc244d | d5682d2ef13ad63c68d59d3d0706853a88035ff1 | /week4/netmiko_test.py | 7d33f05d13548fa2bd2280fdb6c9095940601267 | [
"Apache-2.0"
] | permissive | mikealford/ktbyers_automation | 66467f5352a3fbb111fc18f9c90b83cf97a75e79 | d8b30e7ddbe27b4bc62b74bfc051b6d1c099f7f9 | refs/heads/master | 2020-04-17T19:37:42.365653 | 2019-02-19T01:16:41 | 2019-02-19T01:16:41 | 166,872,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | from netmiko import ConnectHandler
from getpass import getpass
password = getpass()
switch1 = {
'device_type': 'cisco_ios',
'ip': '192.168.122.172',
'username': 'malford',
'password': password,
}
switch2 = {
'device_type': 'cisco_ios',
'ip': '192.168.122.173',
'username': 'malford',
'password': password,
}
switch3 = {
'device_type': 'cisco_ios',
'ip': '192.168.122.174',
'username': 'malford',
'password': password,
}
ssh_switch1 = ConnectHandler(**switch1)
ssh_switch2 = ConnectHandler(**switch2)
ssh_switch3 = ConnectHandler(**switch3)
#output = ssh_switch1.send_command("show ip int brief")
config_commands = ['logging buffered 20000']
output = ssh_switch1.send_config_set(config_commands)
print(output)
| [
"mike.alford13@gmail.com"
] | mike.alford13@gmail.com |
debf5d538dd470447e69bf1ceafc4368d95d2702 | a276d03f34457c174d2e79fc4fdb17c90299e843 | /projects/buttons/lib/markdown/__version__.py | 65edeeddbe2024b9e9a9513b5ec0deaec794cfdb | [
"MIT"
] | permissive | lucidworks/streams | effba3bc55df10431fb505937180b30d72e248b8 | 89aaf02382494cf09041ca5dadb41dddb86cf9d8 | refs/heads/master | 2021-06-02T04:09:27.626504 | 2020-01-07T01:28:09 | 2020-01-07T01:28:09 | 106,742,467 | 8 | 6 | null | 2020-03-17T21:17:48 | 2017-10-12T20:34:28 | Python | UTF-8 | Python | false | false | 907 | py | #
# markdown/__version__.py
#
# version_info should conform to PEP 386
# (major, minor, micro, alpha/beta/rc/final, #)
# (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
# (1, 2, 0, 'beta', 2) => "1.2b2"
version_info = (2, 4, 0, 'final', 0)
def _get_version():
" Returns a PEP 386-compliant version number from version_info. "
assert len(version_info) == 5
assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
main = '.'.join(map(str, version_info[:parts]))
sub = ''
if version_info[3] == 'alpha' and version_info[4] == 0:
# TODO: maybe append some sort of git info here??
sub = '.dev'
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version_info[3]] + str(version_info[4])
return str(main + sub)
version = _get_version()
| [
"kordless@gmail.com"
] | kordless@gmail.com |
a68ab708a3dd64fa6df53028e3ae4f92b71cde57 | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/collective.siterss-0.4-py2.7.egg/collective/siterss/tests.py | 504d02d5c4f70d7c823545bd4735b78c9bb7ae7c | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | import unittest
#from zope.testing import doctestunit
#from zope.component import testing
#from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import collective.siterss
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
zcml.load_config('configure.zcml',
collective.siterss)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='collective.siterss',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='collective.siterss.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='collective.siterss',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='collective.siterss',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"renansfs@gmail.com"
] | renansfs@gmail.com |
447a68a7c95e7246409e5e6a6d769ae3909d7314 | b03a7b92cef9cbee31918b0608ce58669b92df73 | /jd_home1.py | 014257d838a833709c28bf6bb2c6adc3e7d5d364 | [] | no_license | luobodage/- | 5cbc3f7900867cddb53bf347da57716cd7917481 | affbaa854a031819a74c944d3a95f4dc5d90d08f | refs/heads/master | 2022-12-30T23:49:31.205151 | 2020-10-25T09:02:01 | 2020-10-25T09:02:01 | 297,016,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | import requests
import UserAgent
import lxml.etree as le
import re
import os
def spider_home():
"""
获取物品的url以及标题价格
:return: 返回物品编码
"""
shop = input("请输入你要搜索的商品:")
global headers
headers = UserAgent.get_headers() # 随机获取一个headers
url = 'https://search.jd.com/Search?keyword={shop}&enc=utf-8&wq=%E5%B0%8F&pvid=469d5d51a3184cc9a053124dc020b31f'.format(
shop=shop
)
try:
r = requests.get(
url=url,
headers=headers
).content
content = le.HTML(r)
href = content.xpath('//*[@id="J_goodsList"]/ul/li[1]/div/div[1]/a/@href')
price = content.xpath('//*[@id="J_goodsList"]/ul/li[1]/div/div/strong/i/text()')
title_01 = content.xpath('//*[@id="J_goodsList"]/ul/li[1]/div/div/a/em/text()')
title = [x.strip() for x in title_01 if x.strip() != ''] # 提取标题 将多余空格和\n去除
re_01 = re.compile(r'\d+')
number = re_01.findall(str(href))
shop_price_01 = "".join(price)
print("商品价格:" + shop_price_01)
# for shop_price in price:
#
# print("商品价格:" + shop_price)
global shop_title # 全局定义商品题目 进行文件改标题
shop_title_01 = "".join(title)
print("商品标题:" + shop_title_01)
# for shop_title in title:
# print("商品标题:" + shop_title)
for index in href:
global href_shop
href_shop = 'http:' + index
print(href_shop)
for num in number:
# print(num)
return num
# file_rename()
except:
print('爬取失败')
def file_rename():
file_srcFile = 'id.txt'
file_dstFile = shop_title + '.txt'
os.rename(file_srcFile, file_dstFile) # 改标题
img_srcFile = 'ciyun.png'
img_dstFile = shop_title + '.png'
os.rename(img_srcFile, img_dstFile)
if __name__ == '__main__':
spider_home()
| [
"fuyu16032001@gmail.com"
] | fuyu16032001@gmail.com |
39c457d336d955950a124abbfb9682486e1bbab8 | 90115eeb4d60c1dc26deb1c124d42039d214195c | /ixl/management/commands/createchallenges.py | dacb82b399b6bd7265dc6ed66a89f577c65727cc | [] | no_license | waffle-iron/Newton | ce60a8ccc66bbc23aa764742b197add4cfb4d2d3 | f8f3df4127e88428db0cc73207ac51582db7cd42 | refs/heads/master | 2021-01-02T08:59:39.948797 | 2017-08-02T12:52:52 | 2017-08-02T12:52:52 | 99,116,161 | 0 | 0 | null | 2017-08-02T12:52:52 | 2017-08-02T12:52:51 | null | UTF-8 | Python | false | false | 7,674 | py | # commands/createchallenges.py
# Full path to your django project directory
your_djangoproject_home="/home/alex/newton/"
import django
import datetime
import sys,os
import requests
from variables import second_teachers as assigned_teachers
from variables import mastery_skills, cbaExercises
sys.path.append(your_djangoproject_home)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newton.settings")
django.setup()
from django.core.management.base import BaseCommand, CommandError
from brain.models import StudentRoster, CurrentClass, Teacher
from ixl.models import ChallengeAssignment, Challenge, ChallengeExercise, IXLSkillScores
from libs.functions import nwea_recommended_skills_list as nwea_skills
date = datetime.date.today()
date = date.strftime('%-m/%-d')
todays_date = datetime.date.today()
class Command(BaseCommand):
help = 'Assigns a custom challenge to all students'
def add_arguments(self, parser):
pass
def make_mastery_challenge(self, student, current_challenge, exercise_count):
for addition in mastery_skills:
try:
skill_score = IXLSkillScores.objects.get(student_id=student, ixl_skill_id__skill_id=addition)
if skill_score.score < 96:
exercise_count += 1
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=addition,
required_score=100, )
except:
exercise_count += 1
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=addition,
required_score=100)
if exercise_count == 1:
return exercise_count
return exercise_count
def make_cba_challenge(self, student, current_challenge, exercise_count):
for addition in cbaExercises:
try:
skill_score = IXLSkillScores.objects.get(student_id=student, ixl_skill_id__skill_id=addition)
if skill_score.score < 78:
exercise_count += 1
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=addition,
required_score=80, )
else:
print("Could not add {}".format(addition))
except:
try:
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=addition,
required_score=80)
exercise_count += 1
except:
pass
if exercise_count == 3:
return exercise_count
print("Ran out of cba exercises for {}!".format(student))
return exercise_count
def make_nwea_challenge(self, student, current_challenge, exercise_count):
skill_list = nwea_skills(student, "recommended_skill_list")
domain_list = []
waiting_list = []
for skill in skill_list:
previously_assigned = ChallengeExercise.objects.filter(challenge__challengeassignment__student_id=student, exercise_id=skill[0] )
pal = len(previously_assigned)
print("{} Previously Assigned {} times".format(skill[0],pal))
if pal>3:
continue
if skill[3] in domain_list:
waiting_list.append(skill[0])
elif exercise_count >=5:
waiting_list.append(skill[0])
else:
domain_list.append(skill[3]) # Add this domain to the list
# Create a Challenge Exercise object with the challenge and skill
try:
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=skill[0])
exercise_count += 1
except:
continue
if exercise_count<5:
for skill in waiting_list:
try:
challenge_exercise = ChallengeExercise.objects.create(challenge=current_challenge,
exercise_id=skill[0])
exercise_count += 1
except:
continue
if exercise_count ==5:
return exercise_count
return exercise_count
def handle(self, *args, **options):
for teacher in assigned_teachers:
try: #Get the class
current_class = CurrentClass.objects.get(teacher__last_name=teacher)
except:
print('Teacher {} could not be found.'.format(teacher))
break
student_list = StudentRoster.objects.filter(current_class=current_class)
print("Got student list. Creating Challenges.")
for student in student_list: #Go through one student at a time
title = "{} {}'s {} Challenge".format(student.first_name, student.last_name[0],date)
current_challenge = Challenge.objects.create(title=title, date=todays_date)
exercise_count = 0
exercise_count = self.make_mastery_challenge(student, current_challenge, exercise_count)
exercise_count = self.make_cba_challenge(student, current_challenge, exercise_count)
exercise_count = self.make_nwea_challenge(student, current_challenge, exercise_count)
print("Assigning {} to {}, length: {}".format(title,student, exercise_count))
obj, created = ChallengeAssignment.objects.get_or_create(
student_id=student, challenge=current_challenge,
)
#TODO: Email teachers previous week's scores
# TODO: Add Bonus Exercises
# IXL Challenge Creation
# Create 5 main challenges
# 2 for CBA
# Map the CBAs to IXL Exercises for each of the three.
# Make it change depending on the date
# 2 for NWEA
# 1 for Mastery - based on the current or passed curriculum - 100 Smart Score
# Create 5 Bonus Challenges
from django.core.mail import EmailMessage
def send_an_email():
email = EmailMessage(
subject='Hello',
body='''Body goes here.
How are you?
I hope this email works!''',
from_email='newton@newtonthinks.com',
to=['ins-dauaprqb@isnotspam.com'],
reply_to=['alextrostbtwa@gmail.com.com'],
headers={'Content-Type': 'text/plain'},
)
email.send()
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandbox791822b6aeca4aee8007134fecd331ec.mailgun.org/messages",
auth=("api", "key-cedb9e331a1be78e57582e4e13cac442"),
data={"from": "Mailgun Sandbox <postmaster@sandbox791822b6aeca4aee8007134fecd331ec.mailgun.org>",
"to": "Alex <alextrostbtwa@gmail.com>",
"subject": "Hello Alex",
"text": "Congratulations Alex, you just sent an email with Mailgun! You are truly awesome!"})
send_simple_message() | [
"alexrtrost@gmail.com"
] | alexrtrost@gmail.com |
9b27acf8e7217a6d9531f6f8b2b0b06fc5734d47 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/numpy/ma/core.py | 2a76d165ece3eafac172eaa1d54d982e3d5957f7 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5152f7d1fc4275d76c9cc94b3a0702c65f9c6bff88aa6eaec8df101e7733707e
size 256431
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
984cdfba99e9f183944bffc8080c34e4f54c0e66 | 94ed2113af11ba8b716fb959c5ac0a32c5549c18 | /templates/plexus/{project.name}/actions/About.py | cd853ead8059cee02158904d7f5a4fd013c94cd8 | [
"BSD-3-Clause"
] | permissive | avalentino/pyre | 85ba21388514dc8c206d5136760e23b39aba1cae | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | refs/heads/main | 2023-03-23T04:58:02.903369 | 2021-03-09T17:37:11 | 2021-03-09T17:37:11 | 347,723,195 | 0 | 0 | NOASSERTION | 2021-03-14T18:43:34 | 2021-03-14T18:43:33 | null | UTF-8 | Python | false | false | 4,184 | py | # -*- coding: utf-8 -*-
#
# {project.authors}
# {project.affiliations}
# (c) {project.span} all rights reserved
#
# externals
import {project.name}
# declaration
class About({project.name}.command, family='{project.name}.actions.about'):
"""
Display information about this application
"""
# user configurable state
root = {project.name}.properties.str(default='/')
root.tip = "specify the portion of the namespace to display"
# commands
@{project.name}.export(tip="the name of the app for configuration purposes")
def name(self, plexus, **kwds):
"""
Print the name of the app for configuration purposes
"""
# show me
plexus.info.log("{{!r}}".format(plexus.pyre_name) or "unknown")
# all done
return
@{project.name}.export(tip="the application home directory")
def home(self, plexus, **kwds):
"""
Print the application home directory
"""
# show me
plexus.info.log("{{}}".format(plexus.home))
# all done
return
@{project.name}.export(tip="the application installation directory")
def prefix(self, plexus, **kwds):
"""
Print the application installation directory
"""
# show me
plexus.info.log("{{}}".format(plexus.prefix))
# all done
return
@{project.name}.export(tip="the application configuration directory")
def defaults(self, plexus, **kwds):
"""
Print the application configuration directory
"""
# show me
plexus.info.log("{{}}".format(plexus.defaults))
# all done
return
@{project.name}.export(tip="print the version number")
def version(self, plexus, **kwds):
"""
Print the version of the {project.name} package
"""
# make some space
plexus.info.log({project.name}.meta.header)
# all done
return
@{project.name}.export(tip="print the copyright note")
def copyright(self, plexus, **kwds):
"""
Print the copyright note of the {project.name} package
"""
# show the copyright note
plexus.info.log({project.name}.meta.copyright)
# all done
return
@{project.name}.export(tip="print out the acknowledgments")
def credits(self, plexus, **kwds):
"""
Print out the license and terms of use of the {project.name} package
"""
# make some space
plexus.info.log({project.name}.meta.header)
# all done
return
@{project.name}.export(tip="print out the license and terms of use")
def license(self, plexus, **kwds):
"""
Print out the license and terms of use of the {project.name} package
"""
# make some space
plexus.info.log({project.name}.meta.license)
# all done
return
@{project.name}.export(tip='dump the application configuration namespace')
def nfs(self, plexus, **kwds):
"""
Dump the application configuration namespace
"""
# get the prefix
prefix = self.root or '{project.name}'
# show me
plexus.pyre_nameserver.dump(prefix)
# all done
return
@{project.name}.export(tip='dump the application private filesystem')
def pfs(self, plexus, **kwds):
"""
Dump the application private filesystem
"""
# build the report
report = '\n'.join(plexus.pfs.dump())
# sign in
plexus.info.line('pfs:')
# dump
plexus.info.log(report)
# all done
return
@{project.name}.export(tip='dump the application virtual filesystem')
def vfs(self, plexus, **kwds):
"""
Dump the application virtual filesystem
"""
# get the prefix
prefix = self.root or '/{project.name}'
# build the report
report = '\n'.join(plexus.vfs[prefix].dump())
# sign in
plexus.info.line('vfs: root={{!r}}'.format(prefix))
# dump
plexus.info.log(report)
# all done
return
# end of file
| [
"michael.aivazis@orthologue.com"
] | michael.aivazis@orthologue.com |
f97683759d994ffa651b9fe04556b5fe7227fbec | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/90d8d0df916085db80d40220965daa4a3b7ba311-<test_qz_single>-bug.py | 2282f4798ce0ca3c2d1f5cf68bc695667672a3e3 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | def test_qz_single(self):
n = 5
A = random([n, n]).astype(float32)
B = random([n, n]).astype(float32)
(AA, BB, Q, Z) = qz(A, B)
assert_array_almost_equal(dot(dot(Q, AA), Z.T), A)
assert_array_almost_equal(dot(dot(Q, BB), Z.T), B)
assert_array_almost_equal(dot(Q, Q.T), eye(n))
assert_array_almost_equal(dot(Z, Z.T), eye(n))
assert_(all((diag(BB) >= 0))) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
32d24dea52ce64fed1b3599ceacbc71988bc0232 | 1310ca784c1b0b9238f2407eb59d0704b8ae5a08 | /NextGen/circuitpython/adafruit-circuitpython-bundle-6.x-mpy-20201114/examples/requests_simpletest_cellular.py | 6727815ba8df84aa15bd0a164f457469bad79927 | [] | no_license | RyannDaGreat/LightWave | 6b89838bfd48dba010eb5229b84b206be4e8ccbb | d055b0c01b01b3795d9e6c28b6b70f969893ed97 | refs/heads/master | 2023-07-20T08:23:47.526629 | 2023-07-18T00:25:02 | 2023-07-18T00:25:02 | 123,113,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | # pylint: disable=unused-import
import time
import board
import busio
import digitalio
from adafruit_fona.adafruit_fona import FONA
from adafruit_fona.fona_3g import FONA3G
import adafruit_fona.adafruit_fona_network as network
import adafruit_fona.adafruit_fona_socket as cellular_socket
import adafruit_requests as requests
# Get GPRS details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("GPRS secrets are kept in secrets.py, please add them there!")
raise
# Create a serial connection for the FONA connection
uart = busio.UART(board.TX, board.RX)
rst = digitalio.DigitalInOut(board.D4)
# Use this for FONA800 and FONA808
fona = FONA(uart, rst)
# Use this for FONA3G
# fona = FONA3G(uart, rst)
# Initialize cellular data network
network = network.CELLULAR(
fona, (secrets["apn"], secrets["apn_username"], secrets["apn_password"])
)
while not network.is_attached:
print("Attaching to network...")
time.sleep(0.5)
print("Attached!")
while not network.is_connected:
print("Connecting to network...")
network.connect()
time.sleep(0.5)
print("Network Connected!")
# Initialize a requests object with a socket and cellular interface
requests.set_socket(cellular_socket, fona)
TEXT_URL = "http://wifitest.adafruit.com/testwifi/index.html"
JSON_GET_URL = "http://httpbin.org/get"
JSON_POST_URL = "http://httpbin.org/post"
print("Fetching text from %s" % TEXT_URL)
response = requests.get(TEXT_URL)
print("-" * 40)
print("Text Response: ", response.text)
print("-" * 40)
response.close()
print("Fetching JSON data from %s" % JSON_GET_URL)
response = requests.get(JSON_GET_URL)
print("-" * 40)
print("JSON Response: ", response.json())
print("-" * 40)
response.close()
data = "31F"
print("POSTing data to {0}: {1}".format(JSON_POST_URL, data))
response = requests.post(JSON_POST_URL, data=data)
print("-" * 40)
json_resp = response.json()
# Parse out the 'data' key from json_resp dict.
print("Data received from server:", json_resp["data"])
print("-" * 40)
response.close()
json_data = {"Date": "July 25, 2019"}
print("POSTing data to {0}: {1}".format(JSON_POST_URL, json_data))
response = requests.post(JSON_POST_URL, json=json_data)
print("-" * 40)
json_resp = response.json()
# Parse out the 'json' key from json_resp dict.
print("JSON Data received from server:", json_resp["json"])
print("-" * 40)
response.close()
| [
"sqrtryan@gmail.com"
] | sqrtryan@gmail.com |
31b20c33f85cf51e5f5a85fc2e154cd5e696c05c | 622a4baffb2c1e47aa9f1ac10eedeaf97e16c2a4 | /DataFreaksSchool/apps/school/admin.py | 0cb291f1c034da2523b31ef46444010cdfe6b23a | [] | no_license | Noeuclides/DataFreaks | 81bff087ee813bff4529245a27c09ea5ff6086d8 | 4739316223e31feffe5a020505727be983001be0 | refs/heads/master | 2023-04-27T18:06:49.788374 | 2020-02-05T00:50:36 | 2020-02-05T00:50:36 | 237,767,894 | 0 | 1 | null | 2023-04-21T20:47:07 | 2020-02-02T12:28:54 | Python | UTF-8 | Python | false | false | 269 | py | from django.contrib import admin
from .models import CustomUser, Student, Teacher, Course, Note
# Register your models here.
admin.site.register(CustomUser)
admin.site.register(Student)
admin.site.register(Teacher)
admin.site.register(Course)
admin.site.register(Note) | [
"euclidesnoeuclides@gmail.com"
] | euclidesnoeuclides@gmail.com |
8c5a37c5f4bd04e4bdee20dfee9587c03cbae32c | 3e05276c6562bbca2c46daec0bf30d765bb6c8d5 | /jobseeker/forms.py | 7ce3e3e0bb7a3ade0d8400cacafb8924bcf73ddf | [] | no_license | mrpal39/portflioWebapp | 762571a74979ddcd4abf90c8ab8684dcd2afa6fa | 898023c0b528557d4ab5ece6c48707f5e61ea296 | refs/heads/master | 2023-08-07T06:17:28.081727 | 2021-10-05T16:30:27 | 2021-10-05T16:30:27 | 414,057,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,814 | py |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.db import models
from django.db.models import fields
from password_reset.forms import PasswordRecoveryForm, PasswordResetForm
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
from .models import Profile
from django.db import transaction
from django.contrib.auth.forms import UserCreationForm
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields['bio'].widget.attrs.update({"class":"form-control"})
self.fields['image'].widget.attrs.update({"class":"image"})
self.fields['email'].widget.attrs.update({"class":"form-control"})
self.fields['phone'].widget.attrs.update({"class":"form-control"})
self.fields['occupation'].widget.attrs.update({"class":"form-control"})
self.fields['social_account'].widget.attrs.update({"class":"form-control"})
self.fields['experence'].widget.attrs.update({"class":"form-control"})
self.fields['skills'].widget.attrs.update({"class":"form-control"})
self.fields['age'].widget.attrs.update({"class":"form-control"})
self.fields['status'].widget.attrs.update({"class":"form-control"})
self.fields['mobile'].widget.attrs.update({"class":"form-control"})
self.fields['gender'].widget.attrs.update({"class":"form-control"})
class ProfilUpdateForm(UserForm):
class Meta:
model = Profile
fields = (
'bio',
'image',
'email',
'phone',
'social_account',
'experence',
'skills',
'occupation',
'age',
'status',
'mobile',
'gender',
)
def __init__(self, *args, **kwargs):
super(ProfilUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit("submit", _("Save Changes")))
def save(self):
user = super().save(commit=False)
user.save()
return user
class ProfileForm(UserForm):
class Meta:
model = Profile
fields = (
'bio',
'experence',
'skills',
'age',
'status',
'mobile',
'gender',
)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit("submit", _("Save Changes")))
def save(self):
user = super().save(commit=False)
user.save()
return user
| [
"rp9545416@gmail.com"
] | rp9545416@gmail.com |
b321ddea753491e4d4c5b8d334c61a4c303b5cd0 | 96148bf17555c028f5650d51f496f349c89e8c79 | /build/cob_driver/cob_utilities/catkin_generated/pkg.develspace.context.pc.py | a5e228897d169a8b0fdaa67c1507b846b574a730 | [] | no_license | kerekare/ros_hydra_libphidgetsupdated | 239daed94a95f60743c5659f1102183641761240 | e05e58417fb03a14d627bc80d09af3b2a0fcceab | refs/heads/master | 2016-09-05T23:35:43.792883 | 2014-03-25T16:32:01 | 2014-03-25T16:32:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kerekare/workspace/care-o-bot/src/cob_driver/cob_utilities/common/include".split(';') if "/home/kerekare/workspace/care-o-bot/src/cob_driver/cob_utilities/common/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcob_utilities".split(';') if "-lcob_utilities" != "" else []
PROJECT_NAME = "cob_utilities"
PROJECT_SPACE_DIR = "/home/kerekare/workspace/care-o-bot/devel"
PROJECT_VERSION = "0.5.0"
| [
"kerekare@i60sr2.(none)"
] | kerekare@i60sr2.(none) |
1519cd3690074f07ddfb744acb91bcd6f0e5a6a8 | 26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9 | /pygame/Astar/pathfinding/Graph_old.py | bb30a06775a80a8af4ce9dfcaed88b9af6e2cc5d | [] | no_license | MeetLuck/works | 46da692138cb9741a913d84eff6822f107510dc7 | ab61175bb7e2ed5c5113bf150e0541ae18eb04c4 | refs/heads/master | 2020-04-12T05:40:25.143075 | 2017-08-21T17:01:06 | 2017-08-21T17:01:06 | 62,373,576 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | from random import random
from colors import *
#bgcolor = lightgray
grid = [" * ",
" *** ",
" ",
"* ** ",
"* "]
def findNode(node,lst):
return node in lst
class Node:
def __init__(self):
self.adjacent = [] # UP,DOWN,LEFT,RIGHT(NWSE)
self.previous = None
self.label = ""
def clear(self):
self.previous = None
self.cost = 'Infinity'
def __str__(self):
return self.label
class Graph:
def __init__(self,grid):
self.rows = len(grid)
self.cols = len(grid[0])
self.createLabels()
self.createNodes()
def createLabels(self):
# create some labels
self.labels = list()
for i in range(65,91):
self.labels.append( chr(i) )
# first = ["",'A','B','C']
# for i,val in enumerate(first):
# prefix = first[i]
# for j in range(65,91):
# self.labels.append(prefix + chr(j))
def createNodes(self):
# create one node per square in the grid
self.nodes = list()
for i in range(self.rows*self.cols):
node = Node()
node.label = self.labels[i]
self.nodes.append(node)
# add edges to adjacent nodes
for r in range(self.rows):
for c in range(self.cols):
node = self.nodes[self.cols * r + c]
# ignore blocked squares
if grid[r][c] == '*': continue
# figure out the adjacent nodes
if r > 0 and grid[r-1][c] == ' ': # UP
node.adjacent.append(self.nodes[self.cols*(r-1) + c])
if r < self.rows-1 and grid[r+1][c] == ' ': # DOWN
node.adjacent.append(self.nodes[self.cols*(r+1) + c])
if c > 0 and grid[r][c-1] == ' ': # LEFT
node.adjacent.append(self.nodes[self.cols*r + c-1])
if c < self.cols-1 and grid[r][c+1] == ' ': # RIGHT
node.adjacent.append(self.nodes[self.cols*r + c+1])
def findNodeByLabel(self,label):
for i,val in enumerate(self.nodes):
if self.nodes[i].label == label:
return val
# Search
class Search:
def __init__(self, graph,start,goal):
self.graph = graph
self.reachable = list()
self.explored = list()
self.path = list()
self.start_label = start
self.goal_label = goal
def reset(self):
self.reachable = [ self.graph.findNodeByLabel(self.start_label) ]
self.goal = self.graph.findNodeByLabel(self.goal_label)
self.explored = list()
self.path = list()
self.iteration = 0
for i,node in enumerate(self.graph.nodes):
self.graph.nodes[i].clear()
#self.reachable[0].cost = 0
#self.render()
def step(self):
if len(self.path) > 0: # is the search already done ?
return
# if there are no more nodes to consider, we're done
if len(self.reachable) == 0:
self.finished = True
return
self.iteration += 1
# choose a node to examine next
node = self.chooseNode()
# are we done yet?
if node== self.goal:
while node:
self.path.append(node)
node = node.previous
print '------------- find path ----------------'
self.render()
return
# do not repeat
self.reachable.remove(node)
self.explored.append(node)
# where can we get from here?
# self.render()
# if node is None: return
for adjnode in node.adjacent:
self.addAdjacent(node,adjnode)
self.render()
def chooseNode(self):
return self.reachable[ int(random()* len(self.reachable)) ]
def addAdjacent(self,node,adjacent):
if findNode(adjacent,self.explored) or findNode(adjacent,self.reachable):
return
adjacent.previous = node
self.reachable.append(adjacent)
def render(self):
print '================== render =============='
print 'reachable ==>'
for rnode in self.reachable:
print rnode.label,
print
print 'explored ==>'
for enode in self.explored:
print enode.label,
print
print 'path ==>'
print self.path
if __name__ == '__main__':
g = Graph(grid)
search = Search(g,'A','T')
search.reset()
search.render()
for i in range(40):
search.step()
# search.step()
# print g.labels
# for node in g.nodes:
# print node
| [
"withpig1994@hanmail.net"
] | withpig1994@hanmail.net |
67473f9f435beec220fa067cf392d561ef7b110b | d8a766184f7d2e4379a9578b6bd01451f4434fd8 | /waynes_world/server.py | a8b013a456acdc8013c43b1f65b05507c9a6c9c7 | [] | no_license | YaoQ/zmq_examples | 1e9c386f3d8b51e04208bcededb8f64938a5200e | 631867073b79087c4bf94dff7ff3c57c113fc9a1 | refs/heads/master | 2020-11-30T13:03:08.111975 | 2011-06-03T21:16:59 | 2011-06-03T21:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #!/usr/bin/env python
import zmq
import time
ctx = zmq.Context()
# one socket
s1 = ctx.socket(zmq.PUB)
s1.bind("tcp://127.0.0.1:5566")
# another
s2 = ctx.socket(zmq.PUB)
s2.bind("ipc://*:5567")
while True:
print 'Camera 1?'
s1.send("Camera 1")
time.sleep(1)
print 'Camera 2?'
s2.send("Camera 2")
time.sleep(1)
| [
"jd@j2labs.net"
] | jd@j2labs.net |
dcf370a1bd6932ff3c4ff9bb217104dc2ff6961a | 2fc849ee16732463779d4445954941538828879a | /source/webapp/migrations/0001_initial.py | 04cf199a27d95189a00f4255ac850a68e5143e58 | [] | no_license | Aisuluu1405/python_group_3_homework_48_Aisulu_Dzhusupova | 83da037fc9200a1a213fdf0eb5b09ed2febae79a | d4272516816fb83ff4d8a1c64645a508aecf37ee | refs/heads/master | 2023-05-02T16:13:24.948710 | 2019-09-23T12:30:14 | 2019-09-23T12:30:14 | 210,325,979 | 0 | 0 | null | 2023-04-21T20:38:01 | 2019-09-23T10:19:38 | Python | UTF-8 | Python | false | false | 1,036 | py | # Generated by Django 2.2 on 2019-09-20 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Product name')),
('description', models.TextField(blank=True, max_length=2000, null=True, verbose_name='Description')),
('category', models.CharField(choices=[('other', 'Other'), ('clothes', 'Clothes'), ('shoes', 'Shoes'), ('accessories', 'Accessories'), ('beauty', 'Beauty')], default='other', max_length=30, verbose_name='Category')),
('count', models.FloatField(verbose_name='Count')),
('price', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Price')),
],
),
]
| [
"aisuluueco2009@yandex.ru"
] | aisuluueco2009@yandex.ru |
e432d89a1b17d0c1572e3c78718079a42a30ce0d | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/encoding/encodings/muxings/packed_audio/customdata/customdata_api.py | d4a1fef29ca90072435cc6650196696f08976147 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 1,484 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.custom_data import CustomData
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class CustomdataApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CustomdataApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, encoding_id, muxing_id, **kwargs):
# type: (string_types, string_types, dict) -> CustomData
"""Packed Audio muxing Custom Data
:param encoding_id: Id of the encoding.
:type encoding_id: string_types, required
:param muxing_id: Id of the Packed Audio muxing
:type muxing_id: string_types, required
:return: Packed Audio muxing custom data
:rtype: CustomData
"""
return self.api_client.get(
'/encoding/encodings/{encoding_id}/muxings/packed-audio/{muxing_id}/customData',
path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id},
type=CustomData,
**kwargs
)
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
f9552e5fb9ea367cd1fb32326b63cd871d695afb | 2b4668ba8ff74aa03d031786956c4d4802bfe02b | /util/samm_resolver.py | 5b5a982d47eede73c9a4c109120b5b22b97f4f1d | [
"BSD-3-Clause"
] | permissive | ioggstream/dsomm-orm | a8397ab6f73d46a0acfb8928ad7e835bef9b759a | 52c2040b1cb7263d568af548ab18acdcc3700292 | refs/heads/main | 2023-07-12T21:52:52.988267 | 2021-08-13T22:56:30 | 2021-08-13T22:56:30 | 360,102,120 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | #
# Parses yaml files contained in https://github.com/OWASP/samm/tree/master/Supporting%20Resources/v2.0/Datamodel/Datafiles
#
from pathlib import Path
import yaml
def read_yaml(f):
return yaml.safe_load(Path(f).read_text())
class SammResolver:
def __init__(self, basepath="."):
self.basepath = Path(basepath)
self.functions = [
f.name[:-4].split(" ")[1] for f in self.basepath.glob("Function *yml")
]
self.functions_map = {x[0]: x for x in self.functions}
self.practices_map = self._parse_practices()
self.streams = self._parse_streams()
self.activities = list(
set([f.name[:-4].split(" ")[1] for f in self.basepath.glob("Activit*.yml")])
)
def _parse_practices(self):
practices = {}
for f in self.basepath.glob("Practice *yml"):
p = read_yaml(f)
if "shortDescription" not in p:
continue
practices[p["shortName"]] = p["name"]
return practices
def _parse_streams(self):
streams = {}
for f in self.basepath.glob("Stream *yml"):
s = read_yaml(f)
s_id = f.name[7:-4]
s_name = s["name"]
streams[s_id] = s_name
return streams
def parse_activity(self, a):
function, practice, maturity, stream = a.split("-")
stream_id = f"{function}-{practice}-{stream}"
return {
"id": a,
"function": self.functions_map[function],
"practice": self.practices_map[practice],
"maturity": maturity,
"stream": self.streams[stream_id],
}
def test_parse_activities():
fpath = "downloads/Datafiles"
samm = SammResolver(fpath)
for a in samm.activities:
print(samm.parse_activity(a))
def test_samm_to_csv():
fpath = "downloads/Datafiles"
samm = SammResolver(fpath)
import pandas as pd
df = pd.DataFrame([samm.parse_activity(a) for a in samm.activities])
df.to_csv("samm_activities.csv")
from sqlalchemy import create_engine
engine = create_engine("mysql+mysqlconnector://root:root@127.0.0.1/dsomm")
df.to_sql("samm", con=engine)
| [
"robipolli@gmail.com"
] | robipolli@gmail.com |
dca2aacac1572c1ca6111998fa21040bca5af015 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/208513/kaggle-liberty-hazard-prediction-master/models/neighbors.py | 72c4ced2d31ec580b44e987575d068641d85f2de | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | # import pandas as pd
# import numpy as np
# import pickle
# from sklearn import preprocessing
# from sklearn.cross_validation import train_test_split
# from sklearn.grid_search import RandomizedSearchCV
# from sklearn.neighbors import KNeighborsRegressor
# from sklearn.neighbors import KNeighborsClassifier
# ##################################################################################
# # cal metric
# def gini(solution, submission):
# df = zip(solution, submission, range(len(solution)))
# df = sorted(df, key=lambda x: (x[1],-x[2]), reverse=True)
# rand = [float(i+1)/float(len(df)) for i in range(len(df))]
# totalPos = float(sum([x[0] for x in df]))
# cumPosFound = [df[0][0]]
# for i in range(1,len(df)):
# cumPosFound.append(cumPosFound[len(cumPosFound)-1] + df[i][0])
# Lorentz = [float(x)/totalPos for x in cumPosFound]
# Gini = [Lorentz[i]-rand[i] for i in range(len(df))]
# return sum(Gini)
# def normalized_gini(solution, submission):
# normalized_gini = gini(solution, submission)/gini(solution, solution)
# return normalized_gini
# ##################################################################################
# #load train and test
# train = pd.read_csv('./data/train.csv', index_col=0)
# test = pd.read_csv('./data/test.csv', index_col=0)
# train_y = train.Hazard
# # drop train_y -> train_y
# train.drop('Hazard', axis=1, inplace=True)
# # drop noisy features
# train.drop('T2_V10', axis=1, inplace=True)
# train.drop('T2_V7', axis=1, inplace=True)
# train.drop('T1_V13', axis=1, inplace=True)
# train.drop('T1_V10', axis=1, inplace=True)
# test.drop('T2_V10', axis=1, inplace=True)
# test.drop('T2_V7', axis=1, inplace=True)
# test.drop('T1_V13', axis=1, inplace=True)
# test.drop('T1_V10', axis=1, inplace=True)
# # columns and index for later use
# columns = train.columns
# test_ind = test.index
# train = np.array(train)
# test = np.array(test)
# # label encode the categorical variables
# for i in range(train.shape[1]):
# lbl = preprocessing.LabelEncoder()
# lbl.fit(list(train[:,i]) + list(test[:,i]))
# train[:,i] = lbl.transform(train[:,i])
# test[:,i] = lbl.transform(test[:,i])
# train = train.astype(np.float32)
# test = test.astype(np.float32)
# ##################################################################################
# with open('./data/train_denoise.vec', 'rb') as f:
# train = pickle.load(f)
# with open('./data/test_denoise.vec', 'rb') as f:
# test = pickle.load(f)
# with open('./data/train_y.vec', 'rb') as f:
# train_y = pickle.load(f)
# train_x_sp, test_x_sp, train_y_sp, test_y_sp = train_test_split(train, train_y, train_size=0.8, random_state=50)
# rgrs = KNeighborsRegressor(n_neighbors =100)
# rgrs.fit(train_x_sp, train_y_sp)
# pred = rgrs.predict(test_x_sp)
# score = normalized_gini(test_y_sp, pred)
# print '{:.6f}'.format(score)
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
24454c08c7717f816db15594fcbc51f9901da313 | c81377ee1e27d00f797fcf2ad68317ba42429ca5 | /LIZA_DAMIAN_CARLOS/PARA/bucle_para01.py | bf3b86c64c3c7a11735a0aed52813614fb771917 | [] | no_license | CARLOSC10/T07_LIZA.DAMIAN_ROJAS.CUBAS | 39c55f08a1178b611125979741a3538276fa5d40 | ad736fb83de76f6342f8d53b4b5acfe1ecc88b7f | refs/heads/master | 2020-09-16T14:24:40.571198 | 2019-12-19T23:41:21 | 2019-12-19T23:41:21 | 223,798,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #REPETITIVAS "PARA" QUE MUESTRA LOS N PRIMEROS NUMEROS, N SE ESCRIBE POR TECLADO
import os
n=0
#ARGUMENTOS
n=int(os.sys.argv[1])
#INPUT VALIDA LOS DATOS
datos_incorectos=(n<0)
#WHILE
#MIESTRAS LOS DATOS SEAN INCORECTOS A LA CONDICION ENTRA EN WHILE
while(datos_incorectos==True):
n=int(input("DATOS INGRESADOS INVALIDOS:Ingrese nuevamente los datos:"))
datos_incorectos=(n<0)
#fin_while
print("FIN DEL BUCLE")
#PROCESSING DE LA ESTRUCTURA "PARA"
i=0
while(i<=n):
print(i)
i+=1
#fin_while
| [
"clizad@unprg.edu.pe"
] | clizad@unprg.edu.pe |
fcba8acddc2b4aef40d9b982a26b8447898804b1 | 86fcd7e56f7409dc05fb1cc07496a38e39ef2607 | /vispy/testing/tests/test_testing.py | 8877a55354fc9b791914fe58aadeaef4537d012d | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | alexjc/vispy | a1622f7920df5f0ddd11acf56302896fabd5cb37 | 2e528cf3915c8274848d9f3662809485f3dbcf3f | refs/heads/master | 2021-01-15T08:57:01.427072 | 2014-12-08T20:02:11 | 2014-12-08T20:02:11 | 26,930,099 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from nose.tools import assert_raises
from vispy.testing import (assert_in, assert_not_in, assert_is,
run_tests_if_main)
def test_testing():
"""Test testing ports"""
assert_raises(AssertionError, assert_in, 'foo', 'bar')
assert_in('foo', 'foobar')
assert_raises(AssertionError, assert_not_in, 'foo', 'foobar')
assert_not_in('foo', 'bar')
assert_raises(AssertionError, assert_is, None, 0)
assert_is(None, None)
run_tests_if_main()
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
760cb2fd39f86a9b39f87005d16dbdd0b0dc1846 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/water/java_level_reason/lot/president.py | 0dc9e9aba884377bf07220292becfd6d869e08ff | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # -*- coding: utf-8 -*-
import http.client, urllib.parse
# **********************************************
# *** Update or verify the following values. ***
# **********************************************
# Replace the subscriptionKey string value with your valid subscription key.
host = 'api.microsofttranslator.com'
path = '/V2/Http.svc/TranslateArray'
params = ''
ns = "http://schemas.microsoft.com/2003/10/Serialization/Arrays";
# NOTE: AppId is required, but it can be empty because we are sending the Ocp-Apim-Subscription-Key header.
body = """
<TranslateArrayRequest>
<AppId />
<Texts>
<string xmlns=\"%s\">Hello</string>
<string xmlns=\"%s\">Goodbye</string>
</Texts>
<To>fr-fr</To>
</TranslateArrayRequest>
""" % (ns, ns)
def TranslateArray ():
subscriptionKey = '95de3c2322800cad9803e2d338616d8b'
headers = {
'd3320239539e8ef417a1646394d0703b': subscriptionKey,
'Content-type': 'text/xml'
}
conn = http.client.HTTPSConnection(host)
conn.request ("POST", path + params, body, headers)
response = conn.getresponse ()
return response.read ()
result = TranslateArray ()
print (result.decode("utf-8"))
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
9b1be2255716dd81859aaf4f32c15e6443397f0a | a33a5d7d2a9b0b6030f39803553f2689f3d2c743 | /ml/vcwiz/training/trainer.py | 4024824cb15983e56849955a17f6b33eeebf0686 | [] | no_license | yasyf/vc | 6c1a4224d56049aff44f2ffa1c57922ccb7907ab | bd12d8b1248b008516b1547a693008428085de78 | refs/heads/master | 2023-01-10T08:39:34.285545 | 2019-07-29T10:20:31 | 2019-07-29T14:58:33 | 59,700,244 | 11 | 4 | null | 2023-01-09T20:04:34 | 2016-05-25T21:52:18 | Ruby | UTF-8 | Python | false | false | 1,826 | py | from abc import ABC, abstractmethod
import os, sys, tempfile
from google.cloud import storage
class Trainer(ABC):
def __init__(self):
self.model = None
self.output_path = None
self.upload_path = None
self.client = storage.Client(os.environ['GC_PROJECT_ID'])
self.bucket = self.client.bucket(os.environ['GOOGLE_MODEL_BUCKET'])
@abstractmethod
def _train(self, filename):
raise NotImplementedError
def train(self, filename):
self.model = self._train(filename)
def remote_train(self, path):
fd, filename = tempfile.mkstemp(suffix='.csv')
blob = self.bucket.blob(path)
blob.download_to_filename(filename)
self.train(filename)
os.close(fd)
@abstractmethod
def _save(self, model, path):
raise NotImplementedError
def save(self, path):
assert self.model
self._save(self.model, path)
self.output_path = path
def upload(self, path):
assert self.output_path
blob = self.bucket.blob(path)
blob.upload_from_filename(self.output_path)
self.upload_path = path
return blob.generation
def remote_save(self, path):
fd, filename = tempfile.mkstemp(suffix='.model')
self.save(filename)
os.close(fd)
return self.upload(path)
@abstractmethod
def _test(self, model, *args):
raise NotImplementedError
def test(self, *args):
assert self.model
self._test(self.model, *args)
@abstractmethod
def _metrics(self, model, *args):
raise NotImplementedError
def metrics(self, *args):
return self._metrics(self.model, *args)
@classmethod
def _train_and_test(cls, filename, args):
instance = cls()
instance.train(filename)
instance.test(*args)
@classmethod
def train_and_test(cls):
filename = sys.argv[1]
args = sys.argv[2:]
cls._train_and_test(filename, args)
| [
"yasyfm@gmail.com"
] | yasyfm@gmail.com |
b679b28b5411bf945455ca4c62ff77c700dcf922 | 84b05857cbe74d190bdbee18d442d0c720b1b84d | /AlgoExpert_algorithms/Easy/FindThreeLargestNumbers/test_FindThreeLargestNumbersd.py | 1ab120e3c5754042ad18d7723c6535b5dafe7308 | [] | no_license | JakubKazimierski/PythonPortfolio | 1c8c7e7b0f1358fc42a2295b807d0afafd8e88a3 | 3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f | refs/heads/master | 2023-06-01T01:16:22.897097 | 2023-05-15T01:05:22 | 2023-05-15T01:05:22 | 311,473,524 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | '''
Unittests for FindThreeLargestNumbers.py
January 2021 Jakub Kazimierski
'''
import unittest
import FindThreeLargestNumbers
class test_FindThreeLargestNumbers(unittest.TestCase):
'''
Class with unittests for FindThreeLargestNumbers.py
'''
# region Unittests
def test_ExpectedOutput(self):
'''
Checks if returned output is as expected.
'''
input_arr = [10, 5, 9, 10, 12]
output = FindThreeLargestNumbers.findThreeLargestNumbers(input_arr)
self.assertEqual(output, [10, 10, 12])
# endregion
if __name__ == "__main__":
'''
Main method for test cases.
'''
unittest.main() | [
"j.m.kazimierski@gmail.com"
] | j.m.kazimierski@gmail.com |
36bf83381d9d12a2a2f73d94138ec08698d8c928 | d4f579219d0d557973e6b3d6392d887081825dc3 | /PythonNTF/T1/Naloge/logicnioperatorji.py | 0777d56b1679e3b60086c901e99582547c3a29d6 | [] | no_license | aljazvaupotic/Python-Course | 1eb841cc407105c6e14bdb49445d85484de9c6d9 | d1df7b1a357fef5fbc3cccea83fd5adec25e3edf | refs/heads/master | 2023-08-25T09:05:24.258495 | 2021-11-08T10:00:31 | 2021-11-08T10:00:31 | 186,800,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | #______________________________________________________________________________
# Termin 1, 16.5.2019
# Logični operatorji
#______________________________________________________________________________
# Logični operator *konjunkcija* ima naslednjo resničnostno tabelo, kjer
# `F` predstavlja neresnično (`False`), `T` pa resnično (`True`) vrednost:
#
# A B | A /\ B
# -----+-------
# F F | F
# F T | F
# T F | F
# T T | T
#
# S pomočjo vgrajenega operatorja `and` enostavno sestavimo funkcijo
# `konjunkcija(a, b)`, ki sprejme logični vrednosti `a` in `b` ter vrne logično
# vrednost konjunkcije `a /\ b`:
#
# def konjunkcija(a, b):
# return a and b
# =====================================================================
# 1. podnaloga
# Logični operator *disjunkcija* ima naslednjo resničnostno tabelo:
#
# A B | A \/ B
# -----+-------
# F F | F
# F T | T
# T F | T
# T T | T
#
# Sestavite funkcijo `disjunkcija(a, b)`, ki sprejme logični vrednosti
# `a` in `b` ter vrne logično vrednost disjunkcije `a \/ b`. Pri tem si
# pomagajte z vgrajenim operatorjem `or`.
# =============================================================================
# =====================================================================
# 2. podnaloga
# Logični operator *negacija* ima naslednjo resničnostno tabelo:
#
# A | ~A
# --+----
# F | T
# T | F
#
# Sestavite funkcijo `negacija(a)`, ki vrne logično vrednost disjunkcije `~a`.
# =============================================================================
# =====================================================================
# 3. podnaloga
# Logični operator *implikacija* ima naslednjo resničnostno tabelo:
#
# A B | A => B
# -----+-------
# F F | T
# F T | T
# T F | F
# T T | T
#
# Sestavite funkcijo `implikacija(a, b)`, ki vrne logično vrednost
# implikacije `a => b`.
# =============================================================================
# =====================================================================
# 4. podnaloga
# Logični operator *ekvivalenca* ima naslednjo resničnostno tabelo:
#
# A B | A <=> B
# -----+--------
# F F | T
# F T | F
# T F | F
# T T | T
#
# Sestavite funkcijo `ekvivalenca(a, b)`, ki vrne logično vrednost ekvivalence
# `a <=> b`.
#
# Namig: Pomagajte si lahko s funkcijo `implikacija`.
# =============================================================================
# =====================================================================
# 5. podnaloga
# Logični operator *ekskluzivni ali* (*exclusive or* ali XOR) ima naslednjo
# resničnostno tabelo:
#
# A B | A XOR B
# -----+--------
# F F | F
# F T | T
# T F | T
# T T | F
#
# Sestavite funkcijo `xor(a, b)`, ki vrne logično vrednost `a XOR b`.
# =============================================================================
# =====================================================================
# 6. podnaloga
# Logični operator *NAND* (*not and*) ima naslednjo
# resničnostno tabelo:
#
# A B | A NAND B
# -----+---------
# F F | T
# F T | T
# T F | T
# T T | F
#
# Sestavite funkcijo `nand(a, b)`, ki vrne logično vrednost `a NAND b`.
# =============================================================================
| [
"noreply@github.com"
] | aljazvaupotic.noreply@github.com |
b2cff122cc6e2e3b7f77f15c0931ccbb7b0bffc9 | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /code-samples/coursera-17.py | 6b9f38101bcabf8e7262bb9c084391cdcce921fd | [] | no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import io
def create_python_script(filename):
comments = "# Start of a new Python program"
with open(filename, "w") as fOut:
fOut.write(comments)
with open(filename, "r") as fIn:
filesize = fIn.seek(0, io.SEEK_END)
return(filesize)
print(create_python_script("program.py"))
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
2311c794062db12ee14f68625930ee7ec4fc5dd9 | 34a9c26849b3d82318c5d50df1474776e96afc58 | /scheduler/learning_rate/cosine_lr.py | 6de33f7ea0771db16fc3b300dee812194a968e42 | [
"MIT"
] | permissive | vcowwy/CvT_paddle | 483ef210e9864b254f45e556571c686409512afe | de8c28fbbc83e2c6c2479d44971020b15e7b12ec | refs/heads/master | 2023-08-31T08:49:19.237186 | 2021-11-02T09:13:43 | 2021-11-02T09:13:43 | 423,333,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | import logging
import math
import numpy as np
import paddle
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
def __init__(self,
optimizer: paddle.optimizer.Optimizer,
t_initial: int,
t_mul: float = 1.0,
lr_min: float = 0.0,
decay_rate: float = 1.0,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(optimizer,
param_group_field='lr',
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning('Cosine annealing scheduler will have no effect on the learning rate since t_initial = t_mul = eta_mul = 1.')
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [((v - warmup_lr_init) / self.warmup_t) for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [(self.warmup_lr_init + t * s) for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - self.t_initial * i
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [(v * gamma) for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [(lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i))) for lr_max in lr_max_values]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| [
"1719870799@qq.com"
] | 1719870799@qq.com |
8b7099feb3ee046dd8adee97b3da106d2a3c6379 | 9644572133b4cde92745a6c2320069bce926f715 | /general_ocr/datasets/utils/parser.py | c6908ad99250785edfad0034ed46512b122f2d78 | [] | no_license | hnhoangdz/general_ocr | b79306f8078556cdc83690d1d5e19baff30dc878 | 8975731cbc7065aa1825bf857c33b90ad0140c49 | refs/heads/main | 2023-08-16T09:41:56.444851 | 2021-10-19T09:09:44 | 2021-10-19T09:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | # Copyright (c) GeneralOCR. All rights reserved.
import json
from general_ocr.datasets.builder import PARSERS
from general_ocr.utils import StringStrip
@PARSERS.register_module()
class LineStrParser:
"""Parse string of one line in annotation file to dict format.
Args:
keys (list[str]): Keys in result dict.
keys_idx (list[int]): Value index in sub-string list
for each key above.
separator (str): Separator to separate string to list of sub-string.
"""
def __init__(self,
keys=['filename', 'text'],
keys_idx=[0, 1],
separator=' ',
**kwargs):
assert isinstance(keys, list)
assert isinstance(keys_idx, list)
assert isinstance(separator, str)
assert len(keys) > 0
assert len(keys) == len(keys_idx)
self.keys = keys
self.keys_idx = keys_idx
self.separator = separator
self.strip_cls = StringStrip(**kwargs)
def get_item(self, data_ret, index):
map_index = index % len(data_ret)
line_str = data_ret[map_index]
line_str = self.strip_cls(line_str)
line_str = line_str.split(self.separator)
if len(line_str) <= max(self.keys_idx):
raise Exception(
f'key index: {max(self.keys_idx)} out of range: {line_str}')
line_info = {}
for i, key in enumerate(self.keys):
line_info[key] = line_str[self.keys_idx[i]]
return line_info
@PARSERS.register_module()
class LineJsonParser:
"""Parse json-string of one line in annotation file to dict format.
Args:
keys (list[str]): Keys in both json-string and result dict.
"""
def __init__(self, keys=[]):
assert isinstance(keys, list)
assert len(keys) > 0
self.keys = keys
def get_item(self, data_ret, index):
map_index = index % len(data_ret)
json_str = data_ret[map_index]
line_json_obj = json.loads(json_str)
line_info = {}
for key in self.keys:
if key not in line_json_obj:
raise Exception(f'key {key} not in line json {line_json_obj}')
line_info[key] = line_json_obj[key]
return line_info
| [
"towarddatascience@gmail.com"
] | towarddatascience@gmail.com |
2a8c8c68da8aa31cf3e069ae3d86603d00d5ec27 | 9bb7bc13aad5d822f52b0f52e31a468faa964f22 | /lcdb/helpers.py | 18cd06a08d7c1a5bf783781640739cf536a2516e | [
"MIT"
] | permissive | lcdb/lcdb-workflows | 03e9f8a5d887ac23059304d98f8abafe83644708 | ee28a42bc6021b8b82f1950144cda6e841823661 | refs/heads/master | 2021-01-21T15:07:25.121526 | 2019-04-12T21:30:51 | 2019-04-12T21:30:51 | 58,685,798 | 1 | 1 | null | 2016-08-01T13:52:35 | 2016-05-13T00:11:01 | Python | UTF-8 | Python | false | false | 2,947 | py | import os
import pandas
import yaml
from jsonschema import validate, ValidationError
from snakemake.shell import shell
def validate_config(config, schema):
schema = yaml.load(open(schema))
cfg = yaml.load(open(config))
try:
validate(cfg, schema)
except ValidationError as e:
msg = '\nPlease fix %s: %s\n' % (config, e.message)
raise ValidationError(msg)
def build_wrapper_for(source_dir, wrappers_dir):
"""
Returns a `wrapper_for` function to be used in a workflow.
Parameters
----------
:source_dir: str
Directory of the calling snakemake workflow. Typically this is obtained
with the srcdir() built-in.
:wrappers_dir: str
Directory of wrappers relative to source dir
"""
def wrapper_for(tool):
return os.path.join(source_dir, wrappers_dir, tool)
return wrapper_for
def build_params_for(config):
"""
Returns a `params_for` function to be used in a workflow.
Parameters
----------
:config: dict
The global config dictionary from a workflow
"""
def params_for(rule, key):
return config.get('rules', {}).get(rule, {}).get('params', {}).get(key, '')
return params_for
def build_threads_for(config):
"""
Returns a `threads_for` function to be used in a workflow.
Parameters
----------
:config: dict
The global config dictionary from a workflow
"""
def threads_for(rule):
return config.get('rules', {}).get(rule, {}).get('threads', None)
return threads_for
def workflow_helper_functions(config, source_dir, wrappers_dir):
"""
One-stop-shop for building helper functions.
Parameters
----------
:config: dict
The global config dictionary from a workflow
:source_dir: str
Directory of the calling snakemake workflow. Typically this is obtained
with the srcdir() built-in.
:wrappers_dir: str
Directory of wrappers relative to source dir
Returns
-------
wrappers_for, params_for, and threads_for functions.
"""
return (
build_wrapper_for(source_dir, wrappers_dir),
build_params_for(config),
build_threads_for(config),
)
def load_sampletable(filename):
"""
Load sampletable.
TODO: validation will go here.
"""
return pandas.read_table(filename, index_col=0)
def rscript(string, scriptname, log=None):
"""
Saves the string as `scriptname` and then runs it
Parameters
----------
string : str
Filled-in template to be written as R script
scriptname : str
File to save script to
log : str
File to redirect stdout and stderr to. If None, no redirection occurs.
"""
with open(scriptname, 'w') as fout:
fout.write(string)
if log:
_log = '> {0} 2>&1'.format(log)
else:
_log = ""
shell('Rscript {scriptname} {_log}')
| [
"dalerr@niddk.nih.gov"
] | dalerr@niddk.nih.gov |
8ea00d6cb58f877c3542b77e7c1d3bd1ffa1d98e | 40eea049f7e9cef38f30da90b9dd38f840eab240 | /nvij-cliprm-ec362ad713de/cliprm/backend/crawlers/shaze/shaze/spiders/shaze_spider.py | de0f74731fa3f8eaacb3ca2c07c6359ec3237ec2 | [] | no_license | pratikpoddar/clipr | ddf9a6e6ca9e50e84cd7dcc36ae732876e019da6 | a84a88d8a6eb76836b0bef4b6531919a563b1345 | refs/heads/master | 2021-05-27T21:53:03.844876 | 2014-07-18T10:52:41 | 2014-07-18T10:52:41 | 21,977,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | import sys
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from shaze.items import CliprItem
visitedIds = set()
def isProduct(url):
return url.find('/p/') >0
def getId(url):
return int((url.split('/p/')[1]).split('/')[0])
class DmozSpider(CrawlSpider):
name = "shaze"
allowed_domains = ["shaze.in"]
start_urls = [
"http://www.shaze.in"
]
rules = (
# Extract links matching '/p/' for products and '/c/' for categories.
Rule(SgmlLinkExtractor(allow=('/p/','/c/')), callback='parse_item',follow=True),
)
def parse_item(self, response):
return parser(response)
def parser(response):
link = response.url
if (not isProduct(link)):
return []
else:
prodId = getId(link)
items = []
if prodId not in visitedIds:
visitedIds.add(prodId)
site = HtmlXPathSelector(response)
item = CliprItem()
item['link'] = response.url
item['siteId'] = 'shaze'
category = site.select('//div[@class="breadcrumb"]/a/text()').extract()
category = filter(lambda x:x!="", category)
category = filter(lambda x:x.find("You are in Shop")<0, category)
if len(category) <= 1:
print "Error: breadcrumb not found"
sys.exit()
item['title'] = category[len(category) - 1 ]
item['category'] = category[:len(category)-1]
item['price'] = trimPrice(site.select('//span[@class="productPrice"]/text()').extract()[0])
item['markprice'] = item['price']
item['image'] = site.select('//div[@class="picture"]/img/@src').extract()[0]
item['description'] = site.select('//div[@class="overview"]/h2[@class="productnameOverview"]/text()').extract()[0]
item['buylink'] = item['link']
item['recid'] = ""
items.append(item)
return items
def trimPrice(price):
return int(price.replace("Rs",'').replace(".00",'').replace(".",'').replace("/",'').replace(',','').strip())
| [
"pratik.phodu@gmail.com"
] | pratik.phodu@gmail.com |
075137d16c4ea177b032134a2f40c97cd6d7c5ce | 438e546e2acf5aa57c34c6481e477f7025b12e21 | /Grokking Coding Interview/P6 - In Place Reversal of LL /Reverse K Sized Sub-List.py | 2dbe7f39ba36d836a47c76fe3b67fd7d1e61a5e3 | [] | no_license | SajinKowserSK/algorithms-practice | 988537ef3537487cb40c78776dd2c9e1130cde4f | 41bbd55553747492a539b41f6e86bff5504c5842 | refs/heads/master | 2022-11-06T18:22:41.329484 | 2022-10-19T23:40:10 | 2022-10-19T23:40:10 | 206,470,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
def reverse_every_k_elements(head, k):
trav = 1
prev = head
curr = head
lastTail = None
while curr.next:
curr = curr.next
trav += 1
if trav % k == 0:
rest = curr.next
curr.next = None
# important to note that after reversing, "prev" actually becomes the tail
# the head/start is now reversed_head
reversed_head = reverse(prev)
if lastTail is not None:
lastTail.next = reversed_head
lastTail = prev
else:
lastTail = prev
start = reversed_head
prev = rest
curr = rest
trav = 1
lastTail.next = reverse(prev)
return start
def reverse(head):
prev = None
curr = head
while curr:
nextN = curr.next
curr.next = prev
prev = curr
curr = nextN
return prev
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_every_k_elements(head, 3)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
| [
"sajinkowser@gmail.com"
] | sajinkowser@gmail.com |
ab45a62f3cdff764191fa10661d1c3a0d52c4b51 | e129fe32194ad8d15f664cd055062d01caae370f | /tools/betterbib-format | c9fd4371c24e59f8a6cbfba8255c52059f2ba91c | [
"MIT"
] | permissive | tbabej/betterbib | dd3b6895d3fd8ff4cf50b4b8e5fdcd2fb6d31216 | 80a3c9040232d9988f9a1e4c40724b40b9b9ed85 | refs/heads/master | 2020-03-11T10:45:14.243594 | 2018-04-17T18:43:30 | 2018-04-18T13:37:11 | 129,950,668 | 0 | 0 | MIT | 2018-04-17T18:49:03 | 2018-04-17T18:49:02 | null | UTF-8 | Python | false | false | 3,331 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
from __future__ import print_function, unicode_literals
import argparse
import collections
import sys
from pybtex.database.input import bibtex
import betterbib
def _main():
args = _parse_cmd_arguments()
data = bibtex.Parser().parse_file(args.infile)
# Use an ordered dictionary to make sure that the entries are written out
# sorted by their BibTeX key if demanded.
od = betterbib.decode(collections.OrderedDict(
sorted(data.entries.items())
if args.sort_by_bibkey
else data.entries.items()
))
od = _adapt_doi_urls(od, args.doi_url_type)
betterbib.write(
od, args.outfile, args.delimeter_type, tab_indent=args.tabs_indent
)
return
def _adapt_doi_urls(od, doi_url_type):
if doi_url_type == 'new':
od = _update_doi_url(od, lambda doi: 'https://doi.org/' + doi)
elif doi_url_type == 'short':
def update_to_short_doi(doi):
short_doi = betterbib.tools.get_short_doi(doi)
if short_doi:
return 'https://doi.org/' + short_doi
return None
od = _update_doi_url(od, update_to_short_doi)
else:
assert doi_url_type == 'unchanged'
return od
def _update_doi_url(od, url_from_doi):
for bib_id in od:
if 'url' in od[bib_id].fields:
doi = betterbib.tools.doi_from_url(od[bib_id].fields['url'])
if doi:
new_url = url_from_doi(doi)
if new_url:
od[bib_id].fields['url'] = new_url
return od
def _parse_cmd_arguments():
parser = argparse.ArgumentParser(description='Reformat BibTeX files.')
parser.add_argument(
'-v', '--version',
help='display version information',
action='version',
version='betterbib {}, Python {}'.format(
betterbib.__version__, sys.version
)
)
parser.add_argument(
'infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='input BibTeX file (default: stdin)'
)
parser.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
help='output BibTeX file (default: stdout)'
)
parser.add_argument(
'-b', '--sort-by-bibkey',
action='store_true',
help='sort entries by BibTeX key (default: false)'
)
parser.add_argument(
'-t', '--tabs-indent',
action='store_true',
help='use tabs for indentation (default: false)'
)
parser.add_argument(
'-d', '--delimeter-type',
choices=[
'braces',
'quotes',
],
default='braces',
help=(
'which delimeters to use in the output file '
'(default: braces {...})'
),
)
parser.add_argument(
'-u', '--doi-url-type',
choices=[
'unchanged',
'new',
'short'
],
default='new',
help=(
'DOI URL (new: https://doi.org/<DOI> (default), '
'short: https://doi.org/abcde)'
),
)
return parser.parse_args()
if __name__ == '__main__':
_main()
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com | |
f30497be13679db52d80358c553ceabc0ae00929 | 17f1811abda6c828460b77f460671f9c2f464204 | /leetcode/shuffle_an_array.py | 2bcc987387591d1047a0a4164e7da89c024cb1c8 | [] | no_license | rishabhranawat/challenge | f10f69fc30881a0571c4321b466a89aeeb06e568 | e836343be5185f8843bb77197fccff250e9a77e3 | refs/heads/master | 2021-01-21T15:13:47.590675 | 2020-04-25T15:26:42 | 2020-04-25T15:26:42 | 91,833,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # Checkout Fisher Yates
import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
self.pairs = {}
for i in range(0, len(nums), 1):
self.pairs[i] = nums[i]
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
for index, value in self.pairs.items():
self.nums[index] = value
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
temp = self.pairs
new = {}
current = set()
counter = 0
while(len(current) != len(self.nums)):
index = random.randint(0, len(self.nums)-1)
val = self.pairs[index]
if(val not in current):
current.add(val)
self.nums[counter] = val
counter += 1
return self.nums
# Your Solution object will be instantiated and called as such:
nums = [1, 2, 3]
obj = Solution(nums)
param_1 = obj.reset()
print(param_1)
param_2 = obj.shuffle()
print(param_2)
param_1 = obj.reset()
print(param_1) | [
"rishabhranawat12345@gmail.com"
] | rishabhranawat12345@gmail.com |
812d42b9f9e83081cc0bd88c2d1b6b5dcec3a3ab | 85e078ee3ceda5091624233ca19ba42f78747499 | /LeetCode/buy_sell_stock2.py | f9fc5f75d3de157ae7c519c73459053fbf61088c | [] | no_license | papayetoo/StudyinPython | d5e6ec0cff0e97fcc4afc8d846e3658c06eb67c2 | f686b6e08720ad4d7d57b41d24c63c4bfa64dd90 | refs/heads/master | 2021-07-22T04:05:38.993123 | 2021-02-03T14:12:26 | 2021-02-03T14:12:26 | 240,009,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | class Solution:
def buySellStock(self, prices: [int]) -> int:
if len(prices) == 0:
return 0
profit = 0
pair_prices = [(index, value) for index, value in enumerate(prices)]
descending = sorted(pair_prices,
key=lambda x: x[1],
reverse=True)
ascending = sorted(pair_prices,
key=lambda x: x[1])
for d in descending:
tmp = 0
for a in ascending:
if d[0] >= a[0]:
continue
else:
if tmp < a[1] - d[1]:
tmp = a[1] - d[1]
print(tmp)
profit += tmp
return profit
if __name__ == '__main__':
prices = [1, 2, 3, 4, 5]
s = Solution()
print(s.buySellStock(prices))
| [
"rhkdgus0826@gmail.com"
] | rhkdgus0826@gmail.com |
742cf9975339908a3a686a400bc4f2e1c2447a7a | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/contrib/gis/geometry/test_data.py | 1ae4fe529d1427c026130348b99afec6eac57b91 | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import json
import os
from djangocg.contrib import gis
from djangocg.utils import six
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in six.iteritems(d)])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = json.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| [
"timothy.clemans@gmail.com"
] | timothy.clemans@gmail.com |
0038fd3149b3996e6eb7ac75db588001245eb691 | 2ea1fdf72317649c698105be8d84935c55007db0 | /npr_sfs/methods/ibme.py | ad09d7f2eb3c64720ad17bf007bc6edc5ebe989d | [
"MIT"
] | permissive | joepfortunato/NPR-SFS | 25987b5eda4203473059dda1cabdbbb68ecbbf29 | 15d9fd2b83d75214fa851aafcc17f970252dad32 | refs/heads/master | 2021-05-30T02:48:56.252831 | 2015-10-06T07:41:53 | 2015-10-06T07:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,904 | py | # -*- coding: utf-8 -*-
## @package npr_sfs.methods.ibme
#
# Image-Based Material Editing [Kahn et al. 2006].
# @author tody
# @date 2015/07/30
"""Usage: ibme.py [<input>] [-h] [-o] [-q]
<input> Input image.
-h --help Show this help.
-o --output Save output files. [default: False]
-q --quiet No GUI. [default: False]
"""
from docopt import docopt
import numpy as np
import cv2
import matplotlib.pyplot as plt
from npr_sfs.datasets.loader import dataFile
from npr_sfs.io_util.image import loadRGBA, saveNormal
from npr_sfs.cv.image import luminance, alpha
from npr_sfs.plot.window import showMaximize
from npr_sfs.cv.normal import normalizeImage, normalToColor
from npr_sfs.util.logger import getLogger
logger = getLogger(__name__)
def computeGradientNormals(D_32F, sigma=5.0):
h, w = D_32F.shape
gx = cv2.Sobel(D_32F, cv2.CV_64F, 1, 0, ksize=1)
gx = cv2.GaussianBlur(gx, (0, 0), sigma)
gy = cv2.Sobel(D_32F, cv2.CV_64F, 0, 1, ksize=1)
gy = cv2.GaussianBlur(gy, (0, 0), sigma)
T_32F = np.zeros((h, w, 3), dtype=np.float32)
T_32F[:, :, 0] = 1.0
T_32F[:, :, 2] = gx
B_32F = np.zeros((h, w, 3), dtype=np.float32)
B_32F[:, :, 1] = 1.0
B_32F[:, :, 2] = -gy
T_flat = T_32F.reshape(-1, 3)
B_flat = B_32F.reshape(-1, 3)
N_flat = np.cross(T_flat, B_flat)
N_32F = N_flat.reshape(h, w, 3)
N_32F = normalizeImage(N_32F)
return N_32F
def depthRecovery(I_32F, sigma_range=0.1, sigma_space=10,
w_base=0.9, w_detail=0.1):
BL = cv2.bilateralFilter(I_32F, -1, sigma_range, sigma_space)
DL = I_32F - BL
D_32F = w_base * BL + w_detail * DL
return D_32F
def estimateNormal(I_32F):
D_32F = depthRecovery(I_32F)
N_32F = computeGradientNormals(D_32F)
return N_32F, D_32F
def showResult(C_8U, D_32F, N_32F, A_8U):
logger.info("showResult")
plt.subplot(131)
plt.title('Original Color')
plt.imshow(C_8U)
plt.subplot(132)
plt.title('Depth')
plt.imshow(D_32F, cmap=plt.cm.gray)
plt.subplot(133)
plt.title('Estimated Normal')
plt.imshow(normalToColor(N_32F, A_8U))
showMaximize()
def saveResult(input_file, A_8U, N_32F):
logger.info("saveResult")
N_file = input_file.replace(".png", "_N.png")
saveNormal(N_file, N_32F, A_8U)
def main(input_file, output_file, quiet):
C_8U = loadRGBA(input_file)
A_8U = alpha(C_8U)
I_32F = luminance(C_8U)
N_32F, D_32F = estimateNormal(I_32F)
if output_file:
saveResult(input_file, A_8U, N_32F)
if quiet:
return
showResult(C_8U, D_32F, N_32F, A_8U)
if __name__ == '__main__':
args = docopt(__doc__)
if args['<input>']:
input_file = args['<input>']
else:
input_file = dataFile("ThreeBox")
output_file = args['--output']
quiet = args['--quiet']
main(input_file, output_file, quiet) | [
"tody411@gmail.com"
] | tody411@gmail.com |
454f3d60787fdac730e072981d6438e2503218bf | aef0a344e13f6a10f7145e8cd63a514adaa2f5a7 | /tb/irq_rate_limit/test_irq_rate_limit.py | ec8e60e9a36e56a4fd41e43d2460a8fb901e93d5 | [
"MIT"
] | permissive | alexforencich/verilog-pcie | a0ff59662e2d9cac100295b43a9b4ad374bcd406 | 75126f133318b31f226ae13ebc46a40eb52cf3ac | refs/heads/master | 2023-07-20T01:19:06.004282 | 2023-06-24T05:38:06 | 2023-06-24T05:38:06 | 164,569,208 | 765 | 223 | MIT | 2023-07-18T08:36:17 | 2019-01-08T05:28:51 | Verilog | UTF-8 | Python | false | false | 5,159 | py | #!/usr/bin/env python
"""
Copyright (c) 2022 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi.stream import define_stream
IrqBus, IrqTransaction, IrqSource, IrqSink, IrqMonitor = define_stream("Irq",
signals=["index", "valid", "ready"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 4, units="ns").start())
self.irq_source = IrqSource(IrqBus.from_prefix(dut, "in_irq"), dut.clk, dut.rst)
self.irq_sink = IrqSink(IrqBus.from_prefix(dut, "out_irq"), dut.clk, dut.rst)
dut.prescale.setimmediatevalue(0)
dut.min_interval.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.irq_source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.irq_sink.set_pause_generator(generator())
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test_irq(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
dut.prescale.setimmediatevalue(249)
dut.min_interval.setimmediatevalue(100)
tb.log.info("Test interrupts (single shot)")
for k in range(8):
await tb.irq_source.send(IrqTransaction(index=k))
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(110, 'us')
assert tb.irq_sink.empty()
tb.log.info("Test interrupts (multiple)")
for n in range(5):
for k in range(8):
await tb.irq_source.send(IrqTransaction(index=k))
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(99, 'us')
assert tb.irq_sink.empty()
await Timer(11, 'us')
assert not tb.irq_sink.empty()
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(110, 'us')
assert tb.irq_sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [
run_test_irq
]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
def test_irq_rate_limit(request):
dut = "irq_rate_limit"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['IRQ_INDEX_WIDTH'] = 11
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
bdc6890d12b3d567090dc600624a8c446f355672 | cbafab192b5072b8e9150dcada8013503af40fca | /Django_Learning/admin/app01/migrations/0001_initial.py | 7eeda195e315bb1b00276f9051dfd29b6e1748aa | [] | no_license | lafitehhq/PythonProject | 928421b49ff0ea9fd536ca7769a04fe990848929 | d5d0352541a29ee070884263e7eb50160cd7b3b5 | refs/heads/master | 2021-09-06T01:48:42.971720 | 2018-02-01T11:56:50 | 2018-02-01T11:56:50 | 106,712,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-14 06:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='AuthorDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sex', models.BooleanField(choices=[(0, '男'), (1, '女')], max_length=1)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=50)),
('birthday', models.DateField()),
('author', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app01.Author')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, default=10, max_digits=5)),
('publication_date', models.DateField()),
('authors', models.ManyToManyField(to='app01.Author')),
],
),
migrations.CreateModel(
name='Book2Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Author')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Book')),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='名称')),
('address', models.CharField(max_length=50, verbose_name='地址')),
('city', models.CharField(max_length=60, verbose_name='城市')),
('state_province', models.CharField(max_length=30)),
('country', models.CharField(max_length=50)),
('website', models.URLField()),
],
options={
'verbose_name_plural': '出版商',
'verbose_name': '出版商',
},
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Publisher'),
),
migrations.AlterUniqueTogether(
name='book2author',
unique_together=set([('author', 'book')]),
),
]
| [
"lafitehhq@126.com"
] | lafitehhq@126.com |
90b561a17cc041f5c24dc06b96de9a60de196e92 | dff5c14ce2ce94b1170c4e31b985bc23c25c72a6 | /CLASS 3/2606: 바이러스/solution.py | 88c05d9a1e3256315bd812da4654ae4a23ac2806 | [
"MIT"
] | permissive | coco-in-bluemoon/baekjoon-online-judge | 371c6afb66467d2afd28bc315afc5109fa3bd8cc | 06e14fe89e4ec5b940f2afa20bc5e4b0de08c8f6 | refs/heads/main | 2023-02-15T11:51:38.631843 | 2021-01-08T15:33:17 | 2021-01-08T15:33:17 | 302,237,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from collections import deque
def solution(num_node, edges):
START_NODE = 1
graph = {node: set() for node in range(1, num_node+1)}
for src, dst in edges:
graph[src].add(dst)
graph[dst].add(src)
visited = {node: False for node in range(1, num_node+1)}
queue = deque([START_NODE])
counter = 0
while queue:
node = queue.popleft()
if visited[node]:
continue
visited[node] = True
if node != START_NODE:
counter += 1
for adjacent_node in graph[node]:
if visited[adjacent_node]:
continue
queue.append(adjacent_node)
return counter
if __name__ == "__main__":
num_node = int(input())
num_edge = int(input())
edges = list()
for _ in range(num_edge):
src, dst = map(int, input().split())
edges.append([src, dst])
answer = solution(num_node, edges)
print(answer)
| [
"coco.in.bluemoon@gmail.com"
] | coco.in.bluemoon@gmail.com |
70feee5ad2d1a2ce6a9b66799514a767ef8dce50 | c5758c1f4c880f4530df1a5ffb4c30ee2da445ee | /pytracking/vot_ep/sk3x3/vot_wrapper_sk3x3_ep0031.py | 5753454ed67494b9ff345fda9c48e1174ac294a5 | [] | no_license | bfjei2825401/d3s | 6d662fc301181a0e3ad831b0db6111e3cf8f4097 | 32140a3c67252f0e98cbfbf6ad6d2a79267c221b | refs/heads/master | 2023-02-27T09:57:25.692878 | 2021-01-27T14:20:57 | 2021-01-27T14:20:57 | 297,217,521 | 0 | 0 | null | 2020-09-21T03:23:09 | 2020-09-21T03:23:09 | null | UTF-8 | Python | false | false | 2,459 | py | import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3 import SegmSK3x3
from pytracking.parameter.segm_sk3x3 import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(31)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
| [
"752958525@qq.com"
] | 752958525@qq.com |
3acba025f2c13a9f0caf50de16baee79e95de19e | 18ad97292b34a679b8dea8a85090541c5bbf6174 | /averageseasy.py | 97cf78596df80adb1ddda5916d7075d7163cfa81 | [] | no_license | Jyotirm0y/kattis | b941044e39dc36d169450480fc33fd33bd2e0f8e | 2b9c1819ba29419bbea3db2e8ad7851155abbb3a | refs/heads/master | 2023-05-31T21:11:38.350044 | 2021-06-12T08:21:47 | 2021-06-12T08:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | t = int(input())
for _ in range(t):
input()
ncs, ne = map(int, input().split())
iqcs = list(map(int, input().split()))
iqe = list(map(int, input().split()))
sumiqcs = sum(iqcs)
sumiqe = sum(iqe)
print(sum([1 if iq*ne > sumiqe and iq*ncs < sumiqcs else 0 for iq in iqcs]))
| [
"ainunnajib@gmail.com"
] | ainunnajib@gmail.com |
7cd61cc5a2265dd40f86d8fb7e1a9c2e8cd16a39 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /B/BuddyStrings.py | fe5f524ac1581380e3ab3d95645abc58736301d5 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | '''
Given two strings A and B of lowercase letters, return true if you can swap two letters in
A so the result is equal to B, otherwise, return false.
Swapping letters is defined as taking two indices i and j (0-indexed) such that i != j and
swapping the characters at A[i] and A[j]. For example, swapping at indices 0 and 2 in
"abcd" results in "cbad".
Example 1:
Input: A = "ab", B = "ba"
Output: true
Explanation: You can swap A[0] = 'a' and A[1] = 'b' to get "ba", which is equal to B.
Example 2:
Input: A = "ab", B = "ab"
Output: false
Explanation: The only letters you can swap are A[0] = 'a' and A[1] = 'b', which results
in "ba" != B.
Example 3:
Input: A = "aa", B = "aa"
Output: true
Explanation: You can swap A[0] = 'a' and A[1] = 'a' to get "aa", which is equal to B.
Example 4:
Input: A = "aaaaaaabc", B = "aaaaaaacb"
Output: true
Example 5:
Input: A = "", B = "aa"
Output: false
Constraints:
0 <= A.length <= 20000
0 <= B.length <= 20000
A and B consist of lowercase letters.
'''
class Solution(object):
def buddyStrings(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B): return False
if A == B and len(set(A)) < len(A): return True
diff = []
for i, (a, b) in enumerate(zip(A,B)):
if a != b:
diff.append(i)
return len(diff) == 2 and A[diff[1]] == B[diff[0]] and A[diff[0]] == B[diff[1]]
if __name__ == "__main__":
print(Solution().buddyStrings("ab", "ba")) | [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
df4409470fe736ddda0aa22479628205853deac1 | ef09e86b16f741d0f262f330fc205e493b9d9041 | /polls/migrations/0001_initial.py | d6c669eba075ac5f2bcb3e33a82f245077a8f69f | [] | no_license | sarthakbhooshan/my_first_django_app | 6813d100a90dbe556732406a5d32691c7578b9c5 | b07a50a44fb8126fedfad874d81f0cb5f287a9c1 | refs/heads/master | 2021-01-14T08:38:45.499995 | 2016-07-18T11:18:33 | 2016-07-18T11:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-12 05:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"you@example.com"
] | you@example.com |
267b81539f58299286c9d273a7fa0e636ada96e9 | b0bd3342c244ebf30ae5ab29daa078f2b39010f7 | /utils.py | a035fb3e56880250c7e067c38377ce533c431ec5 | [] | no_license | naiqili/itime_learning | 30a8af7f1234277162ccdd4c69cd9f9a4a7ab412 | d9b191bb32a7e49cb99443d7dccea5bb392aee90 | refs/heads/master | 2021-06-19T04:54:06.239320 | 2017-06-26T13:35:39 | 2017-06-26T13:35:39 | 92,792,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | import csv
import operator
import os
import numpy as np
import tensorflow as tf
from os import path
DATA_DIR_MOVIELENS_100K = 'data/ml-100k'
HOME_DIR = path.expanduser('~')
PROJECTS_DIR = path.join(HOME_DIR, 'Projects')
RANKSYS_DIR = path.join(PROJECTS_DIR, 'RankSys')
RANKSYS_EXAMPLES_DIR = path.join(RANKSYS_DIR, 'RankSys-examples')
NUM_FOLD = 5
NUM_GENRE = 18
THRESHOLD_ITEM = 50
THRESHOLD_USER = 20
VALID_PROPORTION = 0.05
NP_INT_DTYPE = np.int32
NP_FLOAT_DTYPE = np.float32
TF_INT_DTYPE = tf.int32
TF_FLOAT_DTYPE = tf.float32
class Dataset(object):
def __init__(self, data_dir, num_fold=NUM_FOLD):
user_filepath = path.join(data_dir, 'users.csv')
item_filepath = path.join(data_dir, 'items.csv')
self.num_fold = num_fold
num_user = count_num_line(user_filepath)
num_item = count_num_line(item_filepath)
print('#users={}\t#items={}'.format(num_user, num_item))
self.num_user, self.num_item = num_user, num_item
datasets = []
for fold in range(num_fold):
train_filepath = path.join(data_dir, 'train{0:1d}.csv'.format(fold))
train_data = load_triple(train_filepath)
test_filepath = path.join(data_dir, 'test{0:1d}.csv'.format(fold))
test_data = load_triple(test_filepath)
valid_filepath = path.join(data_dir, 'valid{0:1d}.csv'.format(fold))
valid_data = load_triple(valid_filepath)
dataset = train_data, test_data, valid_data
datasets.append(dataset)
self.datasets = datasets
def get_dataset(self, fold):
return self.datasets[fold]
def get_cv_index(data_size, num_fold):
quotient, remainder = divmod(data_size, num_fold)
cv_sizes = []
for i in range(remainder):
cv_sizes.append(quotient + 1)
for i in range(num_fold - remainder):
cv_sizes.append(quotient)
cv_index = []
idx_start = 0
for cv_size in cv_sizes:
idx_end = idx_start + cv_size
cv_index.append((idx_start, idx_end))
idx_start = idx_end
return cv_index
def count_num_line(filepath):
num_line = 0
with open(filepath, 'r') as fin:
for line in fin.readlines():
num_line += 1
return num_line
def load_triple(filepath):
alp_elems, bet_elems, gam_elems = [], [], []
with open(filepath, 'r') as f:
for line in f.readlines():
tokens = line.split()
alp_elem = int(tokens[0])
bet_elem = int(tokens[1])
gam_elem = float(tokens[2])
alp_elems.append(alp_elem)
bet_elems.append(bet_elem)
gam_elems.append(gam_elem)
alp_elems = np.asarray(alp_elems, dtype=NP_INT_DTYPE)
bet_elems = np.asarray(bet_elems, dtype=NP_INT_DTYPE)
gam_elems = np.asarray(gam_elems, dtype=NP_FLOAT_DTYPE)
dataset = alp_elems, bet_elems, gam_elems
return dataset
def np_build_indices(row_index, col_index):
return np.concatenate((row_index[:, None], col_index[:, None]), axis=1)
def tf_build_indices(row_index, col_index):
return tf.concat([tf.expand_dims(row_index, 1), tf.expand_dims(col_index, 1)], 1)
if __name__ == '__main__':
# disable all debugging logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
| [
"naiqil@student.unimelb.edu.au"
] | naiqil@student.unimelb.edu.au |
b6cd96dc4083b3617cc8be9fe9c662bdcef2d60f | 37eef4cd7e0e17086fb5cd3e0dd710b43470786a | /tests/commands/test__vi_cc.py | 0f3c95eca2e8808a7538606bfb81c41e3e96a738 | [
"MIT"
] | permissive | DylanBruzenak/Vintageous | 9ffd480aeea0e5c127fec7c9eafb8b5d3acf85c7 | 022faaf22acd72d3514c74013217b7661bf10a37 | refs/heads/master | 2021-01-15T11:34:53.529087 | 2013-08-17T21:02:07 | 2013-08-17T21:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | import unittest
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.tests.commands import set_text
from Vintageous.tests.commands import add_selection
from Vintageous.tests.commands import get_sel
from Vintageous.tests.commands import first_sel
from Vintageous.tests.commands import BufferTest
class Test_vi_cc_InModeInternalNormal(BufferTest):
def testSelectsWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 2), (1, 2)))
self.view.run_command('_vi_cc_motion', {'mode': _MODE_INTERNAL_NORMAL, 'count': 1})
self.assertEqual(self.R((1, 0), (1, 7)), first_sel(self.view))
def testDeletesWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 7)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\nfoo bar\n')
def testKeepsLeadingWhitespace(self):
set_text(self.view, ''.join(('foo bar\n\t foo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 10)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\t \nfoo bar\n')
@unittest.skip("Implement")
def testCanDeleteWithCount(self):
self.assertTrue(False)
@unittest.skip("Implement")
def testDeletedLinesAreYanked(self):
self.assertTrue(False)
| [
"guillermo.lopez@outlook.com"
] | guillermo.lopez@outlook.com |
33f784c238773201f20b7191cb092dc4657a942b | 90ac505fb14e4969cd4e7f164f8969ed2344d3e3 | /BYSL/ea.py | 9b6bb77bb599f20215c88cbaad443e730deffe10 | [] | no_license | rid47/python_basic_book | 4d08641ed802a80f5b5398c568231b366b1cf5d0 | f4a77577115b126094c9e5aac38a18bb42eeb28f | refs/heads/master | 2022-12-22T12:24:48.094483 | 2022-12-10T12:11:52 | 2022-12-10T12:11:52 | 234,990,760 | 0 | 1 | null | 2022-12-02T03:44:50 | 2020-01-20T00:45:53 | Tcl | UTF-8 | Python | false | false | 1,027 | py | class Car:
def __init__(self):
self.carFare = {'Hatchback': 30, 'Sedan': 50, 'SUV': 100}
def displayFareDetails(self):
print("Cost per day: ")
print("Hatchback: $", self.carFare['Hatchback'])
print("Sedan: $", self.carFare['Sedan'])
print("SUV: $", self.carFare['SUV'])
def calculateFare(self, typeOfCar, numberOfDays):
return self.carFare[typeOfCar] * numberOfDays
car = Car()
while True:
print("Enter 1 to display fare details")
print("Enter 2 to rent a car")
print("Enter 3 to exit")
userChoice = int(input())
if userChoice == 1:
car.displayFareDetails()
if userChoice == 2:
print("Enter the type of car you would like to borrow")
typeOfCar = input()
print("Enter the number of dasy you would like to borrow the car")
numberOfDays = int(input())
fare = car.calculateFare(typeOfCar, numberOfDays)
print("Total payable amount: $", fare)
elif userChoice == 3:
quit()
| [
"ridwanmizan@gmail.com"
] | ridwanmizan@gmail.com |
9f60fa0febd594f00a2a621ba5012a8222fc7696 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2244/60690/275105.py | 24682629ee0ab61e98762255770bf7e57e5231fe | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | n=int(input())
def isHW(str):
for i in range(len(str)):
if str[i]!=str[len(str)-1-i]: return False
return True
def isSS(num):
for i in range(2,int(num/2)+1):
if num%i==0:
return False
return True
while isHW(str(n))==False or isSS(n)==False: n+=1
print(n) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ba83a9d4e1b219536ebc3008d7ca7a7053a3910f | e645ebf3b5177eb0ebedb7f239bd6e1b40bf1b07 | /ups/boost_python.cfg | b171d4be2d21271f368e3b34eb327ea98a5d77b8 | [] | no_license | lsst-dm/bp | e095cdb7412124fef39bdd8428fce70bbf0f462a | 31c0b65866d06a09575a53d0dd558320e6994a06 | refs/heads/main | 2023-07-22T11:32:48.479329 | 2023-07-10T00:30:32 | 2023-07-10T00:30:32 | 37,212,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | cfg | # -*- python -*-
"""
Dependencies and configuration for Boost.Python
"""
import os.path
import eups
def _get_root():
"""Return the root directory of the package."""
return eups.productDir("boost")
dependencies = {
# Names of packages required to build against this package.
"required": ["boost", "python"],
# Names of packages optionally setup when building against this package.
"optional": [],
# Names of packages required to build this package, but not required to build against it.
"buildRequired": [],
# Names of packages optionally setup when building this package, but not used in building against it.
"buildOptional": [],
}
def setup(conf, products, build=False):
"""
Update an SCons environment to make use of the package.
Arguments:
conf ------ An SCons Configure context. The SCons Environment conf.env should be updated
by the setup function.
products -- A dictionary consisting of all dependencies and the return values of calls to their
setup() functions, or None if the dependency was optional and was not found.
build ----- If True, this is the product currently being built, and products in "buildRequired" and
"buildOptional" dependencies will also be present in the products dict.
"""
conf.env.PrependUnique(**paths)
if not build:
conf.env.AppendUnique(**doxygen)
for target in libs:
if target not in conf.env.libs:
conf.env.libs[target] = lib[target].copy()
else:
for lib in libs[target]:
if lib not in conf.env.libs[target]:
conf.env.libs[target].append(lib)
return {"paths": paths, "doxygen": doxygen, "libs": libs, "extra": {}}
###################################################################################################
# Variables for default implementation of setup() below; if the user provides
# a custom implementation of setup(), everything below is unnecessary.
# Packages to be added to the environment.
paths = {
# Sequence of paths to add to the include path.
"CPPPATH": [os.path.join(_get_root(), "include")],
# Sequence of paths to add to the linker path.
"LIBPATH": [os.path.join(_get_root(), "lib")],
}
doxygen = {
# Sequence of Doxygen tag files produced by this product.
"DOXYGEN_TAGFILES": [],
# Sequence of Doxygen configuration files to include in dependent products.
"DOXYGEN_INCLUDES": [],
}
# Libraries provided by the package, not including standard library prefixes or suffixes.
# Additional custom targets besides the standard "main", "python", and "test" targets may
# be provided as well.
libs = {
# Normal libraries.
"main": [],
# Libraries only linked with C++-coded Python modules.
"python": ["boost_python"],
# Libraries only linked with C++-coded unit tests.
"test": [],
}
| [
"jbosch@git.lsstcorp.org"
] | jbosch@git.lsstcorp.org |
9846c30b04f991029724e3c7761741398fd0acde | 9b7ef36988860750e3a6b704254ed2aaeb3a3dc7 | /insta/forms.py | b11f0faf52a0e23179021c45bebbb59b22c7d8d5 | [] | no_license | nicky-code/instagram | ed016aef3cabed46cdff3f1c8598fb9445ea12e5 | 6d9eb31cca33ed137b730fb23cd15ea7a8482faa | refs/heads/master | 2021-09-09T14:38:38.152140 | 2019-10-22T14:21:35 | 2019-10-22T14:21:35 | 215,626,513 | 0 | 0 | null | 2021-09-08T01:22:42 | 2019-10-16T19:20:08 | Python | UTF-8 | Python | false | false | 917 | py | from django import forms
from .models import Image,Profile,Comments
class ImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['image_name', 'likes','user','profile','comments']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user', 'profile','user_id']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['user_profile','image_comment']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class InstagramForm(forms.Form):
your_name = forms.CharField(label='First Name',max_length=30)
email = forms.EmailField(label='Email')
| [
"aline.nicole7@gmail.com"
] | aline.nicole7@gmail.com |
5a1874482abbdd857dd7f934e6aef889e1c11e38 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_commercialism.py | 9e09ea8f68ec64d83d9e926fdaf0cb983af5d158 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
#calss header
class _COMMERCIALISM():
def __init__(self,):
self.name = "COMMERCIALISM"
self.definitions = [u'the principles and activity of commerce, especially those connected with profit rather than quality or doing good']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
973bf4a1bc852efaa74a10f6ea3c1548fc8bd3da | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/common/Lib/plat-os2emx/IN.py | 4106b079428faa0be3a055320a5a82364e207dc1 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,138 | py | # 2016.11.19 20:01:08 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-os2emx/IN.py
PAGE_SIZE = 4096
HZ = 100
MAXNAMLEN = 260
MAXPATHLEN = 260
def htonl(X):
return _swapl(X)
def ntohl(X):
return _swapl(X)
def htons(X):
return _swaps(X)
def ntohs(X):
return _swaps(X)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
def IN_CLASSA(i):
return long(i) & 2147483648L == 0
IN_CLASSA_NET = 4278190080L
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 16777215
IN_CLASSA_MAX = 128
def IN_CLASSB(i):
return long(i) & 3221225472L == 2147483648L
IN_CLASSB_NET = 4294901760L
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 65535
IN_CLASSB_MAX = 65536
def IN_CLASSC(i):
return long(i) & 3758096384L == 3221225472L
IN_CLASSC_NET = 4294967040L
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 255
def IN_CLASSD(i):
return long(i) & 4026531840L == 3758096384L
IN_CLASSD_NET = 4026531840L
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 268435455
def IN_MULTICAST(i):
return IN_CLASSD(i)
def IN_EXPERIMENTAL(i):
return long(i) & 3758096384L == 3758096384L
def IN_BADCLASS(i):
return long(i) & 4026531840L == 4026531840L
INADDR_ANY = 0
INADDR_LOOPBACK = 2130706433
INADDR_BROADCAST = 4294967295L
INADDR_NONE = 4294967295L
INADDR_UNSPEC_GROUP = 3758096384L
INADDR_ALLHOSTS_GROUP = 3758096385L
INADDR_MAX_LOCAL_GROUP = 3758096639L
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\plat-os2emx\IN.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 20:01:08 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
1f5eeb1362379c5c3b4038b981bfe90f79acab37 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/Ultra-Fast-Lane-Detection/scripts/convert_tusimple.py | 112ae4d113e7480f3b05e412d7499966a93165a3 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,498 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import tqdm
import numpy as np
import pdb
import json, argparse
def calc_k(line):
"""
Calculate the direction of lanes
"""
line_x = line[::2]
line_y = line[1::2]
length = np.sqrt((line_x[0] - line_x[-1]) ** 2 + (line_y[0] - line_y[-1]) ** 2)
if length < 90:
return -10 # if the lane is too short, it will be skipped
p = np.polyfit(line_x, line_y, deg=1)
rad = np.arctan(p[0])
return rad
def draw(im, line, idx, show=False):
'''
Generate the segmentation label according to json annotation
'''
line_x = line[::2]
line_y = line[1::2]
pt0 = (int(line_x[0]), int(line_y[0]))
if show:
cv2.putText(im, str(idx), (int(line_x[len(line_x) // 2]), int(line_y[len(line_x) // 2]) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
idx = idx * 60
for i in range(len(line_x) - 1):
cv2.line(im, pt0, (int(line_x[i + 1]), int(line_y[i + 1])), (idx,), thickness=16)
pt0 = (int(line_x[i + 1]), int(line_y[i + 1]))
def get_tusimple_list(root, label_list):
'''
Get all the files' names from the json annotation
'''
label_json_all = []
for l in label_list:
l = os.path.join(root, l)
label_json = [json.loads(line) for line in open(l).readlines()]
label_json_all += label_json
names = [l['raw_file'] for l in label_json_all]
h_samples = [np.array(l['h_samples']) for l in label_json_all]
lanes = [np.array(l['lanes']) for l in label_json_all]
line_txt = []
for i in range(len(lanes)):
line_txt_i = []
for j in range(len(lanes[i])):
if np.all(lanes[i][j] == -2):
continue
valid = lanes[i][j] != -2
line_txt_tmp = [None] * (len(h_samples[i][valid]) + len(lanes[i][j][valid]))
line_txt_tmp[::2] = list(map(str, lanes[i][j][valid]))
line_txt_tmp[1::2] = list(map(str, h_samples[i][valid]))
line_txt_i.append(line_txt_tmp)
line_txt.append(line_txt_i)
return names, line_txt
def generate_segmentation_and_train_list(root, line_txt, names):
"""
The lane annotations of the Tusimple dataset is not strictly in order, so we need to find out the correct lane order for segmentation.
We use the same definition as CULane, in which the four lanes from left to right are represented as 1,2,3,4 in segentation label respectively.
"""
train_gt_fp = open(os.path.join(root, 'train_gt.txt'), 'w')
for i in tqdm.tqdm(range(len(line_txt))):
tmp_line = line_txt[i]
lines = []
for j in range(len(tmp_line)):
lines.append(list(map(float, tmp_line[j])))
ks = np.array([calc_k(line) for line in lines]) # get the direction of each lane
k_neg = ks[ks < 0].copy()
k_pos = ks[ks > 0].copy()
k_neg = k_neg[k_neg != -10] # -10 means the lane is too short and is discarded
k_pos = k_pos[k_pos != -10]
k_neg.sort()
k_pos.sort()
label_path = names[i][:-3] + 'png'
label = np.zeros((720, 1280), dtype=np.uint8)
bin_label = [0, 0, 0, 0]
if len(k_neg) == 1: # for only one lane in the left
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[1] = 1
elif len(k_neg) == 2: # for two lanes in the left
which_lane = np.where(ks == k_neg[1])[0][0]
draw(label, lines[which_lane], 1)
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[0] = 1
bin_label[1] = 1
elif len(k_neg) > 2: # for more than two lanes in the left,
which_lane = np.where(ks == k_neg[1])[0][0] # we only choose the two lanes that are closest to the center
draw(label, lines[which_lane], 1)
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[0] = 1
bin_label[1] = 1
if len(k_pos) == 1: # For the lanes in the right, the same logical is adopted.
which_lane = np.where(ks == k_pos[0])[0][0]
draw(label, lines[which_lane], 3)
bin_label[2] = 1
elif len(k_pos) == 2:
which_lane = np.where(ks == k_pos[1])[0][0]
draw(label, lines[which_lane], 3)
which_lane = np.where(ks == k_pos[0])[0][0]
draw(label, lines[which_lane], 4)
bin_label[2] = 1
bin_label[3] = 1
elif len(k_pos) > 2:
which_lane = np.where(ks == k_pos[-1])[0][0]
draw(label, lines[which_lane], 3)
which_lane = np.where(ks == k_pos[-2])[0][0]
draw(label, lines[which_lane], 4)
bin_label[2] = 1
bin_label[3] = 1
cv2.imwrite(os.path.join(root, label_path), label)
train_gt_fp.write(names[i] + ' ' + label_path + ' ' + ' '.join(list(map(str, bin_label))) + '\n')
train_gt_fp.close()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--root', required=True, help='The root of the Tusimple dataset')
return parser
if __name__ == "__main__":
args = get_args().parse_args()
# training set
names, line_txt = get_tusimple_list(args.root,
['label_data_0601.json', 'label_data_0531.json', 'label_data_0313.json'])
# generate segmentation and training list for training
generate_segmentation_and_train_list(args.root, line_txt, names)
# testing set
names, line_txt = get_tusimple_list(args.root, ['test_tasks_0627.json'])
# generate testing set for testing
with open(os.path.join(args.root, 'test.txt'), 'w') as fp:
for name in names:
fp.write(name + '\n')
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
8deed70e29027f1dfb4b831b5f3bedafcc194e64 | 5ee5853eb335fcf575d4344366ef9b4bce03570d | /pr1658m/min_operations.py | 2008296fd086a4c60d9774021e9bc8ae9d23e3b3 | [
"MIT"
] | permissive | l33tdaima/l33tdaima | 15463fb2f8d61286a4a3a7bacaaee2ab1f7c4f43 | f35305c618b383a79d05074d891cf0f7acabd88f | refs/heads/main | 2023-07-20T21:52:26.330301 | 2023-07-19T02:30:22 | 2023-07-19T02:30:22 | 99,509,451 | 1 | 0 | MIT | 2018-10-31T15:10:49 | 2017-08-06T19:44:29 | JavaScript | UTF-8 | Python | false | false | 772 | py | from typing import List
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
target = sum(nums) - x
if target == 0:
return len(nums)
ans, s, smap = 0, 0, {0: -1}
for i, n in enumerate(nums):
s += n
if s - target in smap:
ans = max(ans, i - smap[s - target])
smap[s] = i
return len(nums) - ans if ans else -1
# TESTS
for nums, x, expected in [
([1, 1, 4, 2, 3], 5, 2),
([5, 6, 7, 8, 9], 4, -1),
([3, 2, 20, 1, 1, 3], 10, 5),
([4, 2, 1, 3], 10, 4),
]:
sol = Solution()
actual = sol.minOperations(nums, x)
print("The minimum operations in", nums, "to reduce", x, "to zero ->", actual)
assert actual == expected
| [
"l33tdaima@github.com"
] | l33tdaima@github.com |
b03961fffa86ad304863eef3bced898e77c688c3 | c55aedc3479a4d311fb406d8133b0e0ceb99d2df | /example/kdtree_0_base/kdtree_3_mesh_color.py | 7304ed067ce6dd6971b19b2aab7ca20736696a5c | [] | no_license | tarwcz111111111/DashCam_python | 4a33cdb3e5a8368b81ddc7c0596d4f0802b7c9d6 | 6e025ff49261c146205eb56bbbf4175f1d413f54 | refs/heads/master | 2020-08-25T04:55:16.695561 | 2017-08-28T04:34:59 | 2017-08-28T04:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | #!/usr/bin/python3
# ==============================================================
# Pack the matrix process into google parse
# ==============================================================
import numpy as np
import triangle
from glumpy import glm
import sys
sys.path.append('/home/andy/Documents/gitHub/DashCam_python/module') # use the module under 'module'
import file_process
import google_parse
import glumpy_setting
import base_process
# Create dashCamFileProcess and load 50 top Dashcam
dashCamFileProcess = file_process.DashCamFileProcessor()
# Manual anchor, but I think this is so wrong.
#anchor = {'panoId': 'uSjqj9Lt256V8I7RckMykA', 'Lat': 25.068939, 'Lon': 121.479781}
anchor = {'panoId': 'JfAAg1RD0myOqNIU0utdNA', 'Lat': 22.622543, 'Lon': 120.285735}
"""
For Visual
"""
sleIndex = 6
for fileIndex in range(sleIndex,sleIndex+1):
fileID = str(dashCamFileProcess.list50[fileIndex][1])
print(fileID, fileIndex)
fileID += '_info3d'
"""
Create the global metric point cloud,
then set the region anchor
"""
sv3DRegion = google_parse.StreetView3DRegion(fileID)
sv3DRegion.init_region(anchor=None)
anchor_matrix_whole = sv3DRegion.anchorMatrix
index = 0
for sv3D_id, sv3D in sorted(sv3DRegion.sv3D_Dict.items()):
### WHY???
sv3D.create_ptcloud_ground_grid()
sv3D.apply_global_adjustment()
sv3D.apply_local_adjustment()
if index == 0:
data = sv3D.ptCLoudData
data_gnd = sv3D.ptCLoudDataGnd
data_gnd_grid = sv3D.ptCLoudDataGndGrid
else:
data = np.concatenate((data, sv3D.ptCLoudData), axis=0)
data_gnd = np.concatenate((data_gnd, sv3D.ptCLoudDataGnd), axis=0)
data_gnd_grid = np.concatenate((data_gnd_grid, sv3D.ptCLoudDataGndGrid), axis=0)
index += 1
if index > 0:
break
#break
gpyWindow = glumpy_setting.GpyWindow()
#programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data, name=None, point_size=1, anchor_matrix=anchor_matrix_whole)
#programSV3DRegion.apply_anchor()
#gpyWindow.add_program(programSV3DRegion)
programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data_gnd, name=None, point_size=1, anchor_matrix=anchor_matrix_whole)
programSV3DRegion.apply_anchor()
gpyWindow.add_program(programSV3DRegion)
#programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data_gnd_grid, name=None, point_size=1, anchor_matrix=anchor_matrix_whole, alpha=0)
#programSV3DRegion.apply_anchor()
#gpyWindow.add_program(programSV3DRegion)
"""
Triangle
"""
tri = np.array(triangle.delaunay(data_gnd['a_position'][:, 0:2]), dtype=np.uint32)
#data_gnd_grid['a_position'] = base_process.sv3d_apply_m4(data=data_gnd_grid['a_position'], m4=np.linalg.inv(anchor_matrix_whole))
#data_gnd['a_position'][:, 2] = 0
programGround = glumpy_setting.ProgramPlane(data=data_gnd, name=str(index), face=tri)
gpyWindow.add_program(programGround)
#programAxis = glumpy_setting.ProgramAxis(line_length=5)
#gpyWindow.add_program(programAxis)
gpyWindow.run()
| [
"ydnaandy123@gmail.com"
] | ydnaandy123@gmail.com |
80149282aabb59543236536d133ab52397c545e0 | 672809bd026d006e785f87c72995a2f368702d63 | /site_main/matcher/matcher.py | 439dbfb48a1e1430f417dd3e2fda9da149d0dabc | [] | no_license | kz26/uchicagolunch | 34c391688897dc88edc78ccc771805c2f76d64d5 | f1b0415856e7a62a8ca12ea824af3483a80c876d | refs/heads/master | 2016-09-06T17:35:21.703308 | 2012-03-23T23:00:36 | 2012-03-23T23:00:36 | 2,806,713 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | from site_main.models import *
from site_main.emails import *
from datetime import datetime, timedelta, date, time
import edmonds
from random import choice
# find and process matches
# returns a list of newly created Match objects
def run():
# retrieve a list of unfilled requests
def getRequests():
reqs = Request.objects.filter(expires__gt=datetime.now(), matched=False, active=True)
rl = {}
for r in reqs:
r_id = r.pk
r_dates = set(r.day_set.all().values_list('date', flat=True))
r_prefs = set(r.restaurant_prefs.all().values_list('pk', flat=True))
rl[r_id] = (r_dates, r_prefs)
return rl
# create a graph from the above
# dictionary key: request ID
# dictionary value: compatible request IDs
def createGraph(rl):
g = {}
for k1, v1 in rl.iteritems():
if k1 not in g:
g[k1] = []
for k2, v2 in rl.iteritems():
if k2 == k1: continue
if not v2[0].isdisjoint(v1[0]) and not v2[1].isdisjoint(v1[1]):
g[k1].append(k2)
return g
# runs Edmond's matching algorithm on the input graph
def findMatches(g):
return edmonds.matching(g)
reqs = getRequests()
g = createGraph(reqs)
matches = findMatches(g)
matched = []
results = []
for k, v in matches.iteritems():
if k in matched or v in matched: continue
req1 = reqs[k]
req2 = reqs[v]
suggested_day = choice(tuple(req1[0].intersection(req2[0])))
suggested_date = datetime.combine(suggested_day, time(12)) + timedelta(minutes=choice(range(0, 135, 15)))
suggested_rc = choice(tuple(req1[1].intersection(req2[1])))
suggested_rest = choice(list(Restaurant.objects.filter(category__pk=suggested_rc)))
reqo1 = Request.objects.get(pk=k)
reqo2 = Request.objects.get(pk=v)
mo = Match.objects.create(request1=reqo1, request2=reqo2, location=suggested_rest, date=suggested_date)
notify_match(mo)
for r in (reqo1, reqo2):
r.matched = True
r.save()
matched.extend((k, v))
results.append(mo)
return results
| [
"whitehat2k9@gmail.com"
] | whitehat2k9@gmail.com |
0e036c343fc1a1037156ec9e3dc7c44563c81dbf | b23bb2c9c98909c53e779e762c359fdb7b0cf412 | /tests/unit/raml/tests/test_traits.py | b6eeb58116958ff7ca17198f63b8cd6eedbd49c1 | [
"MIT"
] | permissive | mpetyx/pyapi | 4902e97340e2597fcfe52968dc6902a96d9a3448 | 1c8c5b392e8a943ebff0864b129defdbf21570f2 | refs/heads/master | 2021-01-06T20:37:33.974145 | 2015-05-26T10:06:41 | 2015-05-26T10:06:42 | 29,341,456 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | __author__ = 'ad'
import os.path
from pyapi.libraries.pyraml_parser_master import pyraml
from pyapi.libraries.pyraml_parser_master.pyraml import parser
from pyapi.libraries.pyraml_parser_master.pyraml.entities import RamlRoot, RamlTrait, RamlBody, RamlResourceType
fixtures_dir = os.path.join(os.path.dirname(__file__), '../', 'samples')
def test_parse_traits_with_schema():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 1, p.traits
assert isinstance(p.traits["traitOne"], RamlTrait), p.traits
assert isinstance(p.traits["traitOne"].body, RamlBody), p.traits["traitOne"]
assert p.traits["traitOne"].body.schema == """{ "$schema": "http://json-schema.org/draft-03/schema",
"type": "object",
"description": "A product presentation",
"properties": {
"id": { "type": "string" },
"title": { "type": "string" }
}
}
""", p.traits["traitOne"].body.schema
def test_parse_raml_with_many_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/full-config.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 2, p.traits
assert isinstance(p.traits["simple"], RamlTrait), p.traits
assert isinstance(p.traits["knotty"], RamlTrait), p.traits
assert p.traits["simple"].displayName == "simple trait"
assert p.traits["knotty"].displayName == "<<value>> trait"
def test_parse_resource_type_with_references_to_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.resourceTypes, "Property `traits` should be set"
assert len(p.resourceTypes)
assert 'typeParent' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeParent'], RamlResourceType), p.resourceTypes
parent_resource_type = p.resourceTypes['typeParent']
assert parent_resource_type.methods, p.resourceTypes['typeParent']
assert 'get' in parent_resource_type.methods
assert 'typeChild' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeChild'], RamlResourceType), p.resourceTypes | [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
7ee2fd4d8932d9ae1e9f9b0a3189f6b31cfc3a56 | e87532daceef2e6d0db72238d647c5bde0993198 | /apps/market/urls.py | ab5af579c5daee08bd75cf7d23a5bc60b9ba297d | [] | no_license | brijmohan/zamboni | cddfd07078c3eae902785d007c1f1e94b581c269 | 57eca56bfeae4f28547856d64284d10970905809 | refs/heads/master | 2020-12-25T03:11:58.888828 | 2011-10-24T09:25:06 | 2011-10-25T18:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from django.conf.urls.defaults import patterns, url
from addons.urls import ADDON_ID
from market import views
urlpatterns = patterns('',
url(r'^verify/%s$' % ADDON_ID, views.verify_receipt,
name='api.market.verify'),
)
| [
"amckay@mozilla.com"
] | amckay@mozilla.com |
8230cf200ed8c3a204e1cdb5def5c66e6fbfd784 | f7aa97fe19b431523f35dc5badc9e8ff919ffa00 | /fss17/project/tools/axe/libWhere.py | 2731db6b521497034bcb603c18bfc22c6d81453a | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | rahlk/fss17 | 3b331427d450c5bb46b71b4aa5c77c59a8ec0a70 | 49e22c4ad01ff751f24c3e5702b7fa36a3a18e96 | refs/heads/master | 2021-01-19T18:03:13.364689 | 2017-12-12T12:51:28 | 2017-12-12T12:51:28 | 101,105,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | """
# Support Code
## Standard Headers
"""
from __future__ import division, print_function
import random
import sys
sys.dont_write_bytecode = True
from settingsWhere import *
"""
## Simple, low-level stuff
### Maths Stuff
"""
def gt(x, y): return x > y
def lt(x, y): return x < y
def medianIQR(lst, ordered=False):
if not ordered:
lst = sorted(lst)
n = len(lst)
q = n // 4
iqr = lst[q * 3] - lst[q]
if n % 2:
return lst[q * 2], iqr
else:
p = max(0, q - 1)
return (lst[p] + lst[q]) * 0.5, iqr
def median(lst, ordered=False):
return medianIQR(lst, ordered)[0]
"""
An accumulator for reporting on numbers.
"""
class N():
"Add/delete counts of numbers."
def __init__(i, inits=[]):
i.zero()
map(i.__iadd__, inits)
def zero(i):
i.n = i.mu = i.m2 = 0
i.cache = Cache()
def sd(i):
if i.n < 2:
return 0
else:
return (max(0, i.m2) / (i.n - 1)) ** 0.5
def __iadd__(i, x):
i.cache += x
i.n += 1
delta = x - i.mu
i.mu += delta / (1.0 * i.n)
i.m2 += delta * (x - i.mu)
return i
def __isub__(i, x):
i.cache = Cache()
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta / (1.0 * i.n)
i.m2 -= delta * (x - i.mu)
return i
class Cache:
"Keep a random sample of stuff seen so far."
def __init__(i, inits=[]):
i.all, i.n, i._has = [], 0, None
map(i.__iadd__, inits)
def __iadd__(i, x):
i.n += 1
if len(i.all) < 128: # if not full
i._has = None
i.all += [x] # then add
else: # otherwise, maybe replace an old item
if random.random() <= The.cache.size / i.n:
i._has = None
i.all[int(random.random() * The.cache.size)] = x
return i
def has(i):
if i._has == None:
lst = sorted(i.all)
med, iqr = medianIQR(i.all, ordered=True)
i._has = o(
median=med, iqr=iqr,
lo=i.all[0], hi=i.all[-1])
return i._has
"""
### Random stuff.
"""
by = lambda x: random.uniform(0, x)
rseed = random.seed
any = random.choice
rand = random.random
def seed(r=None):
global The
if The is None: The = defaults()
if r is None: r = The.seed
rseed(r)
"""
### List Handling Tricks
"""
def first(lst): return lst[0]
def second(lst): return lst[1]
def third(lst): return lst[2]
"""
### Printing Stuff
Print without newline:
"""
def say(*lst): print(*lst, end="")
"""
Print a list of numbers without an excess of decimal places:
"""
def gs(lst): return [g(x) for x in lst]
def g(x):
txt = '%g' % x
return int(txt) if int(x) == x else float(txt)
"""
Pretty print a dictionary:
"""
def showd(d):
def one(k, v):
if isinstance(v, list):
v = gs(v)
if isinstance(v, float):
return ":%s %g" % (k, v)
return ":%s %s" % (k, v)
return ' '.join([one(k, v) for k, v in
sorted(d.items())
if not "_" in k])
"""
## Decorator to run code at Start-up
"""
def go(f):
"A decorator that runs code at load time."
print("\n# ---|", f.__name__, "|-----------------")
if f.__doc__: print("#", f.__doc__)
f()
"""
## Handling command line options.
Convert command line to a function call.
e.g. if the file lib.py ends with
if __name__ == '__main__':eval(todo())
then
python lib.py myfun :a 1 :b fred
results in a call to _myfun(a=1,b='fred')_.
"""
def todo(com="print(The._logo,'WHERE (2.0) you at?')"):
import sys
if len(sys.argv) < 2: return com
def strp(x):
return isinstance(x, basestring)
def wrap(x):
return "'%s'" % x if strp(x) else str(x)
def oneTwo(lst):
while lst: yield lst.pop(0), lst.pop(0)
def value(x):
try:
return eval(x)
except:
return x
def two(x, y):
return x[1:] + "=" + wrap(value(y))
twos = [two(x, y) for x, y in oneTwo(sys.argv[2:])]
return sys.argv[1] + '(**dict(' + ','.join(twos) + '))'
"""
## More interesting, low-level stuff
"""
def timing(f, repeats=10):
"How long does 'f' take to run?"
import time
time1 = time.clock()
for _ in range(repeats):
f()
return (time.clock() - time1) * 1.0 / repeats
"""
## data.dat Completion Tool
Fills in some details on a table of data.dat. For example,
def nasa93():
vl=1;l=2;n=3;h=4;vh=5;xh=6
return data.dat(indep= [
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data.dat', 'cplx', 'ruse',
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
...
Adds in information on _cols_, _decisions_, _hi,lo_, etc:
{ :cols [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 22, 23, 24]
:decisions [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22]
:eval <function <lambda> at 0x7f3f825bea28>
:hi {0: 4, 1: 4, 2: 4, 3: 5, 4: 4, 5: 5, 6: 5,
7: 6, 8: 3, 9: 3, 10: 6, 11: 6, 12: 4, 13: 5,
14: 5, 15: 3, 16: 5, 17: 4, 18: 4, 19: 4,
20: 3, 21: 3, 22: 980, 23: 8211, 24: 50961}
:lo {0: 4, 1: 4, 2: 4, 3: 5, 4: 2, 5: 2, 6: 2,
7: 2, 8: 3, 9: 3, 10: 3, 11: 3, 12: 2,
13: 3, 14: 3, 15: 3, 16: 2, 17: 1, 18: 1,
19: 3, 20: 3, 21: 2, 22: 0.9, 23: 8.4, 24: 28}
:names ['Prec', 'Flex', 'Resl', 'Team', 'Pmat',
'rely', 'data.dat', 'cplx', 'ruse', 'docu',
'time', 'stor', 'pvol', 'acap', 'pcap',
'pcon', 'aexp', 'plex', 'ltex', 'tool',
'site', 'sced', 'kloc', 'effort',
'defects', 'months']
:objectives [22, 23, 24]
:w {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1,
7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1,
14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1,
20: 1, 21: 1, 22: -1, 23: -1, 24: -1}
}
Code:
"""
def data(indep=[], less=[], more=[], _rows=[]):
nindep = len(indep)
ndep = len(less) + len(more)
m = o(lo={}, hi={}, w={},
eval=lambda m, it: True,
_rows=[o(cells=r, score=0, scored=False,
x0=None, y0=None)
for r in _rows],
names=indep + less + more)
m.decisions = [x for x in range(nindep)]
m.objectives = [nindep + x - 1 for x in range(ndep)]
m.cols = m.decisions + m.objectives
for x in m.decisions:
m.w[x] = 1
for y, _ in enumerate(less):
m.w[x + y] = -1
for z, _ in enumerate(more):
m.w[x + y + z] = 1
for x in m.cols:
all = sorted(row.cells[x] for row in m._rows)
m.lo[x] = all[0]
m.hi[x] = all[-1]
return m
"""
## Start-up Actions
"""
if __name__ == '__main__': eval(todo())
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
9d92f42947a9a168d3bebbdd5e5d06464b004d38 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/tvtk/tests/test_class_tree.py | 963470a3f830e41f91e935de287c3b563a80c44f | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | # Author: Prabhu Ramachandran
# License: BSD style
# Copyright (c) 2004, Enthought, Inc.
"""Tests class_tree.py. Uses the vtk module to test the code. Also
tests if the tree generation works for the __builtin__ module.
"""
import unittest
from enthought.tvtk import class_tree
import vtk
import __builtin__
# This computation can be expensive, so we cache it.
_cache = class_tree.ClassTree(vtk)
_cache.create()
def get_level(klass):
"""Gets the inheritance level of a given class."""
if not klass.__bases__:
return 0
else:
return max([get_level(b) for b in klass.__bases__]) + 1
class TestClassTree(unittest.TestCase):
def setUp(self):
self.t = _cache
def test_basic_vtk(self):
"""Basic tests for the VTK module."""
t = self.t
self.assertEqual(t.get_node('vtkObject').name, 'vtkObject')
self.assertEqual(t.get_node('vtkObject').parents[0].name,
'vtkObjectBase')
if (hasattr(vtk, 'vtkArrayCoordinates')
and issubclass(vtk.vtkArrayCoordinates, object)):
self.assertEqual(len(t.tree[0]), 2)
names = [x.name for x in t.tree[0]]
names.sort()
self.assertEqual(names, ['object', 'vtkObjectBase'])
else:
self.assertEqual(len(t.tree[0]), 1)
self.assertEqual(t.tree[0][0].name, 'vtkObjectBase')
def test_ancestors(self):
"""Check if get_ancestors is OK."""
# The parent child information is already tested so this test
# needs to ensure that the method works for a few known
# examples.
# Simple VTK test.
t = self.t
n = t.get_node('vtkDataArray')
x = vtk.vtkDataArray
ancestors = []
while x.__name__ != 'vtkObjectBase':
x = x.__bases__[0]
ancestors.append(x.__name__)
self.assertEqual([x.name for x in n.get_ancestors()], ancestors)
# Simple __builtin__ test.
t = class_tree.ClassTree(__builtin__)
t.create()
n = t.get_node('TabError')
bases = ['IndentationError', 'SyntaxError',
'StandardError', 'Exception']
if len(Exception.__bases__) > 0:
bases.extend(['BaseException', 'object'])
self.assertEqual([x.name for x in n.get_ancestors()],
bases)
def test_parent_child(self):
"""Check if the node's parent and children are correct."""
t = self.t
for node in t:
n_class = t.get_class(node.name)
base_names = [x.__name__ for x in n_class.__bases__]
base_names.sort()
parent_names = [x.name for x in node.parents]
parent_names.sort()
self.assertEqual(base_names, parent_names)
for c in node.children:
c_class = t.get_class(c.name)
base_names = [x.__name__ for x in c_class.__bases__]
self.assertEqual(node.name in base_names, True)
def test_level(self):
"""Check the node levels."""
t = self.t
for node in t:
self.assertEqual(get_level(t.get_class(node.name)), node.level)
def test_tree(self):
"""Check the tree structure."""
t = self.t
n = sum([len(x) for x in t.tree])
self.assertEqual(n, len(t.nodes))
for level, nodes in enumerate(t.tree):
for n in nodes:
self.assertEqual(n.level, level)
def test_builtin(self):
"""Check if tree structure for __builtin__ works."""
# This tests to see if the tree structure generation works for
# the __builtin__ module.
t = class_tree.ClassTree(__builtin__)
t.create()
self.t = t
self.test_parent_child()
self.test_level()
self.test_tree()
if __name__ == "__main__":
unittest.main()
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
87bfa0465302e0f16e8e9caa8994ab7d156cc520 | 1eb960cec1c1bc891ea7cb9874b11182d753fabb | /news/migrations/0004_message_to_user.py | 197b9b145a6e878b07a38558f57a7fb1d0ef7532 | [] | no_license | squallcs12/kidnews-fbhack | 516f87160042389b9a9be1016d6a71dc95f97d13 | 154368a40b2042671b933a9ac53ca2e469266c84 | refs/heads/master | 2021-01-19T04:24:35.254491 | 2016-07-31T03:27:42 | 2016-07-31T03:27:42 | 64,523,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-30 11:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0003_emotion_useremotion'),
]
operations = [
migrations.AddField(
model_name='message',
name='to_user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='to_users', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"daotranbang@gmail.com"
] | daotranbang@gmail.com |
49520f217fc253f8dd6e40a4f3b78353bec18c90 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1303_c.py | 4d8b66d7ea2f0f8e979a1a705aaa21fe7ae12ec1 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,826 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
q = int(input())
for _ in range(q):
s = input()
d = dict()
f = True
# STEP1: 入力文字列から隣り合う文字の関係を作る
for i in range(len(s)):
# 前後の文字を得る
nkey = s[i + 1] if i != len(s) - 1 else None
pkey = s[i - 1] if i != 0 else None
l = d.get(s[i], [])
# 前の文字がすでに隣接関係に登録されていないなら
if pkey is not None and pkey not in l:
# 文字を入れたいのにすでに両端が他の文字と指定されているなら不可能
if len(l) >= 2:
f = False
break
l.append(pkey)
d[s[i]] = l
l2 = d.get(pkey, [])
if s[i] not in l2:
if len(l2) >= 2:
f = False
break
l2.append(s[i])
d[pkey] = l2
if nkey is not None and nkey not in l:
# 文字を入れたいのにすでに両端が他の文字と指定されているなら不可能
if len(l) >= 2:
f = False
break
l.append(nkey)
d[s[i]] = l
l2 = d.get(nkey, [])
if s[i] not in l2:
# 文字を入れたいのにその左右がreserveされているなら
if len(l2) >= 2:
f = False
break
l2.append(s[i])
d[nkey] = l2
# STEP1終わり。
# codedocaという入力から以下の制約の辞書を作れる
# {'c': ['o', 'a'], 'o': ['c', 'd'], 'd': ['o', 'e'], 'e': ['d'], 'a': ['c']}
# print(d)
# STEP2 隣接するキーボードを作る
s = ""
# 上記で作った文字ごとに順番に処理を行う
for k in d.keys():
if s == "": # 空文字列の時
if len(d[k]) == 0:
s += k
elif len(d[k]) == 1:
s += k + d[k][0]
elif len(d[k]) == 2:
s += d[k][0] + k + d[k][1]
# その文字の位置がまだキーボードに存在しないなら
elif s.find(k) == -1:
ic1 = ic2 = -1
# ヒントとなる制約文字列の位置を探し
if len(d[k]) == 1:
ic1 = s.find(d[k][0])
elif len(d[k]) == 2:
ic1 = s.find(d[k][0])
ic2 = s.find(d[k][1])
# もし、2文字とも配置されているならその間に別の文字を挟まないといけないのでNG
if ic1 != -1 and ic2 != -1:
f = False
# ic1だけが配置されているなら
elif ic1 != -1:
# 先頭なら
if ic1 == 0:
s = k + s # この文字を先頭に加え
if len(d[k]) == 2: # さらにもう一文字未設置文字があるなら
s = d[k][1] + s # さらに先頭にその文字を加える
elif ic1 == len(s) - 1: # 一番後ろなら↑の逆を行う
s = s + k
if len(d[k]) == 2:
s = s + d[k][1]
elif ic2 != -1: # ic2探しの旅
if ic2 == 0: # 先頭がその文字なら
s = s[k][0] + k + s # その文字とさらに隣にいるべき文字を追加
elif ic2 == len(s) - 1: # 一番後ろなら
s = s + k + s[k][0] # 同じように
else: # その文字がすでに配置されているならば
ic = s.find(k)
if ic == 0: # その文字が先頭にあるなら
if len(d[k]) == 2 and s[1] == d[k][0]: # 先頭の隣が1個めの文字のとき
if d[k][1] in s: # 2つめの文字をおこうとする(もうあるなら置けないから失敗)
f = False
s = d[k][1] + s
elif len(d[k]) == 2 and s[1] == d[k][1]:# 先頭の隣が2個めの文字のとき
if d[k][0] in s:# 1つめの文字をおこうとする(もうあるなら置けないから失敗)
f = False
s = d[k][0] + s
elif len(d[k]) == 2: # 先頭の隣が1つめの文字でも2つめの文字でもないなら失敗
f = False
elif len(d[k]) == 1 and s[1] == d[k][0]: # 先頭の隣があるべき隣接文字の場合
pass #なにもしない(=もう配置されているんだから)
else: # それ以外の時、というのはあるべき文字でない場合なので失敗
f = False
elif ic == (len(s) - 1): # その文字が文末にあるなら
if len(d[k]) == 2 and s[len(s) - 2] == d[k][0]: # 隣が1個めの文字のとき
if d[k][1] in s:
f = False
s = s + d[k][1]
elif len(d[k]) == 2 and s[len(s) - 2] == d[k][1]: # 隣が2個めの文字のとき
if d[k][0] in s:
f = False
s = s + d[k][0]
elif len(d[k]) == 2: # 先頭の隣が1つめの文字でも2つめの文字でもないなら失敗
f = False
# 隣が1つめの文字でも2つめの文字でもないなら失敗
elif len(d[k]) == 1 and s[len(s) - 2] == d[k][0]:
pass # 正常
else:
f = False
pass
else: # そうでないなら真ん中に絶対あるので
if s[ic - 1] not in d[k] or s[ic + 1] not in d[k]: # 両端の文字が片方でも違うなら失敗
f = False
else: # この場合は両方がいずれかの文字なのでok
pass
# STEP2 終わり
# STEP3 他の文字で存在しないものをキーボードに足していく
if f:
list_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z']
for c in list_lower:
if c not in s:
s = s + c
print("YES")
print(s)
else:
print("NO")
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5
ababa
codedoca
abcda
zxzytyz
abcdefghijklmnopqrstuvwxyza"""
output = """YES
bacdefghijklmnopqrstuvwxyz
YES
edocabfghijklmnpqrstuvwxyz
NO
YES
xzytabcdefghijklmnopqrsuvw
NO"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"glenda.kanai@gmail.com"
] | glenda.kanai@gmail.com |
0b3c6f4f9fd250ff776a7798baaea5f2b0d80fcc | 60b704673152dfa5130405ce2a318b710fc3b120 | /wrappers/arlexecute/simulation/testing_support.py | 58f5c5bf0e5ccc0c1fe13b471934c2d5a6963bbc | [
"Apache-2.0"
] | permissive | rstofi/algorithm-reference-library | 02b8e6735141fbbc1941cef2f36c8ed7ef2c3e38 | 03415e18ea55afc54eb9534dcd0ca2c7a4b0020a | refs/heads/master | 2020-04-05T20:08:40.043608 | 2019-11-22T09:16:48 | 2019-11-22T09:16:48 | 157,166,061 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | """
Functions that aid testing in various ways.
"""
from processing_components.simulation.testing_support import create_test_image
from processing_components.simulation.testing_support import create_test_image_from_s3
from processing_components.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.testing_support import create_low_test_skymodel_from_gleam
from processing_components.simulation.testing_support import create_low_test_skycomponents_from_gleam
from processing_components.simulation.testing_support import create_test_skycomponents_from_s3
from processing_components.simulation.testing_support import replicate_image
from processing_components.simulation.testing_support import create_blockvisibility_iterator
from processing_components.simulation.testing_support import simulate_gaintable
from processing_components.simulation.testing_support import simulate_pointingtable
from processing_components.simulation.testing_support import simulate_pointingtable_from_timeseries
from processing_components.simulation.testing_support import ingest_unittest_visibility
from processing_components.simulation.testing_support import create_unittest_components
from processing_components.simulation.testing_support import create_unittest_model
from processing_components.simulation.testing_support import insert_unittest_errors | [
"realtimcornwell@gmail.com"
] | realtimcornwell@gmail.com |
474b5eb47ebc4029a4d155c60b3998250e41f214 | 81ec35443bc2567118aece66254c021e73f960d1 | /python3/10.8.dates_and_times.py | 5cdf7b5a8067e22dcf2ab6a0481b3b184e4d981b | [] | no_license | folkol/tutorials | 95f1d641843cc26c04a79f74270721c7de4ac628 | 962b0fd89dac244e7f9dcb03773a25d96413fb0b | refs/heads/master | 2023-08-17T18:50:18.358911 | 2023-08-02T20:46:53 | 2023-08-02T20:47:35 | 66,833,956 | 0 | 0 | null | 2023-09-05T03:40:46 | 2016-08-29T10:26:01 | JavaScript | UTF-8 | Python | false | false | 331 | py | from datetime import date
now = date.today()
print(now) # 2017-06-04
print(repr(now)) # datetime.date(2017, 6, 4)
s = now.strftime('%m-%d-%y. %d %b %Y is a %A on the %d say of %B')
print(s) # 06-04-17. 04 Jun 2017 is a Sunday on the 04 say of June
birthday = date(1980, 11, 2)
age = now - birthday
print(age.days) # 13363
| [
"mattias4@kth.se"
] | mattias4@kth.se |
ee00de44f4a031e1d7bf9de64e82c1f55cbf8028 | f22ca9aecda111a019502b462ce6772cb22d9425 | /test/test_cart_warehouse.py | 9147d67572c24863d56bc019a5bda0aaabd271d4 | [] | no_license | sivanv-unbxd/a2c-sdk-pim | cac05bc6335ddc3c4121d43e2dc476a6fec14965 | 51a07a0b7f90d74569ad14b47b174da7ac1fc374 | refs/heads/main | 2023-05-29T05:45:32.279821 | 2021-06-09T03:52:11 | 2021-06-09T03:52:11 | 375,218,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # coding: utf-8
"""
Swagger API2Cart
API2Cart # noqa: E501
OpenAPI spec version: 1.1
Contact: contact@api2cart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.cart_warehouse import CartWarehouse # noqa: E501
from swagger_client.rest import ApiException
class TestCartWarehouse(unittest.TestCase):
"""CartWarehouse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCartWarehouse(self):
"""Test CartWarehouse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.cart_warehouse.CartWarehouse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"sivanv@unbxd.com"
] | sivanv@unbxd.com |
014f19f93ff2542d81ce256e5af79c1fc0117b20 | 8981902427dc577228dfd5611c6afe86c3e2e9e2 | /dsmr_mqtt/services.py | 6a268e79b229fa528b8db0a29c46449e4d2c96f9 | [] | no_license | genie137/dsmr-reader | 5515f4f92bb05bcf00f0e8a0fbd1a018d408950b | 4d934b4838cb2de4a66ff193f4f3095e9beecd99 | refs/heads/master | 2020-03-21T18:14:05.182137 | 2018-06-12T14:54:55 | 2018-06-12T14:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,411 | py | import configparser
import logging
import json
from django.core import serializers
from django.utils import timezone
import paho.mqtt.publish as publish
from dsmr_mqtt.models.settings import broker, day_totals, telegram, meter_statistics
from dsmr_consumption.models.consumption import ElectricityConsumption
from dsmr_datalogger.models.statistics import MeterStatistics
import dsmr_consumption.services
logger = logging.getLogger('dsmrreader')
def get_broker_configuration():
""" Returns the broker configuration from the settings, in dict format, ready to use with paho.mqtt. """
broker_settings = broker.MQTTBrokerSettings.get_solo()
kwargs = {
'hostname': broker_settings.hostname,
'port': broker_settings.port,
'client_id': broker_settings.client_id,
'auth': None,
}
if broker_settings.username and broker_settings.password:
kwargs.update({
'auth': {
'username': broker_settings.username,
'password': broker_settings.password,
}
})
return kwargs
def publish_raw_dsmr_telegram(data):
""" Publishes a raw DSMR telegram string to a broker, if set and enabled. """
raw_settings = telegram.RawTelegramMQTTSettings.get_solo()
if not raw_settings.enabled:
return
broker_kwargs = get_broker_configuration()
try:
publish.single(topic=raw_settings.topic, payload=data, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_raw_dsmr_telegram() | {}'.format(error))
def publish_json_dsmr_reading(reading):
""" Publishes a JSON formatted DSMR reading to a broker, if set and enabled. """
json_settings = telegram.JSONTelegramMQTTSettings.get_solo()
if not json_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(json_settings.formatting)
json_mapping = config_parser['mapping']
json_dict = {}
# Copy all fields described in the mapping.
for k, v in reading.__dict__.items():
if k not in json_mapping:
continue
config_key = json_mapping[k]
json_dict[config_key] = v
json_reading = json.dumps(json_dict, cls=serializers.json.DjangoJSONEncoder)
broker_kwargs = get_broker_configuration()
try:
publish.single(topic=json_settings.topic, payload=json_reading, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_json_dsmr_reading() | {}'.format(error))
def publish_split_topic_dsmr_reading(reading):
""" Publishes a DSMR reading to a broker, formatted in a separate topic per field name, if set and enabled. """
split_topic_settings = telegram.SplitTopicTelegramMQTTSettings.get_solo()
if not split_topic_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
serialized_reading = json.loads(serializers.serialize('json', [reading]))
reading_fields = dict(serialized_reading[0]['fields'].items())
reading_fields['id'] = serialized_reading[0]['pk']
# Copy all fields described in the mapping.
for k, v in reading_fields.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': v,
})
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_split_topic_dsmr_reading() | {}'.format(error))
def publish_day_totals():
""" Publishes day totals to a broker, if set and enabled. """
json_settings = day_totals.JSONDayTotalsMQTTSettings.get_solo()
split_topic_settings = day_totals.SplitTopicDayTotalsMQTTSettings.get_solo()
if not json_settings.enabled and not split_topic_settings.enabled:
return
try:
latest_electricity = ElectricityConsumption.objects.all().order_by('-read_at')[0]
except IndexError:
# Don't even bother when no data available.
return
day_consumption = dsmr_consumption.services.day_consumption(
day=timezone.localtime(latest_electricity.read_at).date()
)
mqtt_messages = []
if json_settings.enabled:
mqtt_messages += day_totals_as_json(day_consumption, json_settings)
if split_topic_settings.enabled:
mqtt_messages += day_totals_per_topic(day_consumption, split_topic_settings)
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_day_totals() | {}'.format(error))
def day_totals_as_json(day_consumption, json_settings):
""" Converts day consumption to JSON format. """
config_parser = configparser.ConfigParser()
config_parser.read_string(json_settings.formatting)
json_mapping = config_parser['mapping']
json_dict = {}
# Use mapping to setup fields for JSON message.
for k, v in day_consumption.items():
if k not in json_mapping:
continue
config_key = json_mapping[k]
json_dict[config_key] = v
json_data = json.dumps(json_dict, cls=serializers.json.DjangoJSONEncoder)
return [{
'topic': json_settings.topic,
'payload': json_data,
}]
def day_totals_per_topic(day_consumption, split_topic_settings):
""" Converts day consumption to split topic messages. """
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
# Use mapping to setup fields for each message/topic.
for k, v in day_consumption.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': str(v),
})
return mqtt_messages
def publish_split_topic_meter_statistics():
""" Publishes meter statistics to a broker, formatted in a separate topic per field name, if set and enabled. """
split_topic_settings = meter_statistics.SplitTopicMeterStatisticsMQTTSettings.get_solo()
if not split_topic_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
serialized_reading = json.loads(serializers.serialize('json', [MeterStatistics.get_solo()]))
reading_fields = dict(serialized_reading[0]['fields'].items())
reading_fields['id'] = serialized_reading[0]['pk']
# Copy all fields described in the mapping.
for k, v in reading_fields.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': v,
})
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_split_topic_meter_statistics() | {}'.format(error))
| [
"github@dennissiemensma.nl"
] | github@dennissiemensma.nl |
79534b04cc124bc0a45a563f8ce019c809409d7b | 88994e2e840a70ec702cee09e1a13813aa6f800c | /tests/models/observations/test_observations_input_files.py | bccc95a62ae1ed7dbee2e604e345dec01f022f35 | [] | no_license | Clinical-Genomics/cg | 1e9eb0852f742d555a48e8696914ebe177f7d436 | d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08 | refs/heads/master | 2023-09-01T02:04:04.229120 | 2023-08-31T13:50:31 | 2023-08-31T13:50:31 | 82,567,026 | 19 | 8 | null | 2023-09-14T15:24:13 | 2017-02-20T14:29:43 | Python | UTF-8 | Python | false | false | 2,578 | py | """Test ObservationsInputFiles pydantic model behaviour."""
from pathlib import Path
import pytest
from pydantic import ValidationError
from cg.models.observations.input_files import (
MipDNAObservationsInputFiles,
BalsamicObservationsInputFiles,
)
def test_instantiate_input_files(observations_input_files_raw: dict):
"""Tests input files against a pydantic MipDNAObservationsInputFiles."""
# GIVEN a dictionary with the basic input files
# WHEN instantiating an observations input files object
input_files = MipDNAObservationsInputFiles(**observations_input_files_raw)
# THEN assert that it was successfully created
assert isinstance(input_files, MipDNAObservationsInputFiles)
def test_instantiate_input_files_missing_field(
observations_input_files_raw: dict, file_does_not_exist: Path
):
"""Tests input files against a pydantic MipDNAObservationsInputFiles with not existent field."""
# GIVEN a dictionary with the basic input files and a file path that does not exist
observations_input_files_raw["snv_vcf_path"] = file_does_not_exist
# WHEN checking the observation file
# THEN the file is not successfully validated and an error is returned
with pytest.raises(ValidationError):
# WHEN instantiating a ObservationsInputFiles object
MipDNAObservationsInputFiles(**observations_input_files_raw)
def test_instantiate_balsamic_input_files(balsamic_observations_input_files_raw: dict):
"""Tests input files against a pydantic BalsamicObservationsInputFiles."""
# GIVEN balsamic input files
# WHEN instantiating an observations input files object
input_files = BalsamicObservationsInputFiles(**balsamic_observations_input_files_raw)
# THEN assert that it was successfully created
assert isinstance(input_files, BalsamicObservationsInputFiles)
def test_instantiate_balsamic_input_files_missing_field(
balsamic_observations_input_files_raw: dict, file_does_not_exist: Path
):
"""Tests input files against a pydantic BalsamicObservationsInputFiles with not existent field."""
# GIVEN a dictionary with the basic input files and a file path that does not exist
balsamic_observations_input_files_raw["snv_germline_vcf_path"] = file_does_not_exist
# WHEN checking the observation file
# THEN the file is not successfully validated and an error is returned
with pytest.raises(ValidationError):
# WHEN instantiating a ObservationsInputFiles object
BalsamicObservationsInputFiles(**balsamic_observations_input_files_raw)
| [
"noreply@github.com"
] | Clinical-Genomics.noreply@github.com |
020d409514f60d8be97a9d22ef159566ead914e9 | 11b5de6bc38f1cf415ee2b743ce6e7da70e8ede3 | /bin/split-seqs-by-id | dadb98424ce1875063d78de427e1ce7f1f3ff1ca | [
"MIT"
] | permissive | mkcor/bioinformatics-hacks | b21c6e3e3de4a1e28e1b2da754bf186a3faeb088 | 8f0894b8a0cc5595c7c1605ab3551a16e65d0f06 | refs/heads/master | 2020-07-13T22:48:17.092651 | 2019-07-24T22:38:58 | 2019-07-24T22:38:58 | 205,172,284 | 0 | 0 | NOASSERTION | 2019-08-29T13:38:32 | 2019-08-29T13:38:31 | null | UTF-8 | Python | false | false | 917 | #!/usr/bin/env python
import argparse
import logging
from Bio import SeqIO
def parse_args():
"""
return arguments
>>> args = parse_args()
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--log", default="/dev/stderr", help="log file (default=stderr)"
)
parser.add_argument("--output", default="/dev/stdout")
parser.add_argument("--input", default="/dev/stdin")
return parser.parse_args()
def main():
"""
>>> main() # stuff happens
"""
args = parse_args()
logging.basicConfig(filename=args.log, level=logging.INFO)
with open(args.input) as handle:
records = SeqIO.parse(handle, "fasta")
for record in records:
out_file = "{}.fasta".format(record.id)
with open(out_file, "w") as output:
output.write(record.format("fasta"))
if __name__ == "__main__":
main()
| [
"harekrishna@gmail.com"
] | harekrishna@gmail.com | |
130fb9e238720cf124db67fb1ce0d4358ee70e22 | b4cf3438011c9521561143e677736c611ff19a0c | /setup.py | 41f10d1828fa03e1a62e28eb8ac19b63cc45e852 | [] | no_license | BUCT-Vision/boxx | 3e5c24af20c06d4943dc04859e6cbfb577fe8a48 | 3d405c9ad744d2ff9f6f5d9efb1e31962474565b | refs/heads/master | 2020-03-18T17:35:18.573106 | 2018-09-18T02:49:10 | 2018-09-18T02:49:10 | 135,037,392 | 2 | 0 | null | 2018-09-18T02:49:11 | 2018-05-27T10:44:44 | Python | UTF-8 | Python | false | false | 6,981 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from datetime import date
from setuptools import setup, find_packages
# --- import your package ---
#import boxx as package
packageName = 'boxx'
pyPath = '%s/__init__.py'%packageName if os.path.isdir(packageName) else '%s.py'%packageName
with open(pyPath) as f:
lines = f.readlines()
code = ''.join(filter(lambda l: l.startswith('__') and '=' in l, lines))
class Pack():
pass
package = Pack()
package.__name__ = packageName
exec(code, package.__dict__)
if __name__ == "__main__":
# --- Automatically generate setup parameters ---
# Your package name
PKG_NAME = package.__name__
# Your GitHub user name
try:
GITHUB_USERNAME = package.__github_username__
except:
GITHUB_USERNAME = "Unknown-Github-Username"
# Short description will be the description on PyPI
try:
SHORT_DESCRIPTION = package.__short_description__ # GitHub Short Description
except:
print(
"'__short_description__' not found in '%s.__init__.py'!" % PKG_NAME)
SHORT_DESCRIPTION = "No short description!"
# Long description will be the body of content on PyPI page
try:
LONG_DESCRIPTION = open("README.md", "rb").read().decode("utf-8")
except:
LONG_DESCRIPTION = "No long description!"
# Version number, VERY IMPORTANT!
VERSION = package.__version__
# Author and Maintainer
try:
AUTHOR = package.__author__
except:
AUTHOR = "Unknown"
try:
AUTHOR_EMAIL = package.__author_email__
except:
AUTHOR_EMAIL = None
try:
MAINTAINER = package.__maintainer__
except:
MAINTAINER = "Unknown"
try:
MAINTAINER_EMAIL = package.__maintainer_email__
except:
MAINTAINER_EMAIL = None
PACKAGES, INCLUDE_PACKAGE_DATA, PACKAGE_DATA, PY_MODULES = (
None, None, None, None,
)
# It's a directory style package
if os.path.exists(__file__[:-8] + PKG_NAME):
# Include all sub packages in package directory
PACKAGES = [PKG_NAME] + ["%s.%s" % (PKG_NAME, i)
for i in find_packages(PKG_NAME)]
# Include everything in package directory
INCLUDE_PACKAGE_DATA = None
PACKAGE_DATA = {
"": ["*.*"],
}
# It's a single script style package
elif os.path.exists(__file__[:-8] + PKG_NAME + ".py"):
PY_MODULES = [PKG_NAME, ]
# The project directory name is the GitHub repository name
repository_name = os.path.basename(os.path.dirname(__file__))
# Project Url
URL = "https://github.com/{0}/{1}".format(GITHUB_USERNAME, repository_name)
# Use todays date as GitHub release tag
github_release_tag = str(date.today())
# Source code download url
DOWNLOAD_URL = "https://github.com/{0}/{1}/tarball/{2}".format(
GITHUB_USERNAME, repository_name, github_release_tag)
try:
LICENSE = package.__license__
except:
print("'__license__' not found in '%s.__init__.py'!" % PKG_NAME)
LICENSE = ""
PLATFORMS = [
"Windows",
"MacOS",
"Unix",
]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
# Read requirements.txt, ignore comments
try:
REQUIRES = list()
f = open("requirements.txt", "rb")
for line in f.read().decode("utf-8").split("\n"):
line = line.strip()
if "#" in line:
line = line[:line.find("#")].strip()
if line:
REQUIRES.append(line)
except:
print("'requirements.txt' not found!")
REQUIRES = list()
# from boxx import *
# setup = dicto
# tree-setup(
setup(
name=PKG_NAME,
description=SHORT_DESCRIPTION,
# long_description=LONG_DESCRIPTION,
long_description=SHORT_DESCRIPTION+'\nMore information on github: https://github.com/DIYer22/boxx',
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
packages=PACKAGES,
include_package_data=INCLUDE_PACKAGE_DATA,
# package_data=PACKAGE_DATA,
py_modules=PY_MODULES,
url='https://github.com/DIYer22/Box-X',
download_url='https://github.com/DIYer22/Box-X/archive/master.zip',
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
license=LICENSE,
install_requires=REQUIRES,
)
"""
Appendix
--------
::
Frequent used classifiers List = [
"Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
"Development Status :: 6 - Mature",
"Development Status :: 7 - Inactive",
"Intended Audience :: Customer Service",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Legal Industry",
"Intended Audience :: Manufacturing",
"Intended Audience :: Other Audience",
"Intended Audience :: Religion",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: BSD License",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Natural Language :: English",
"Natural Language :: Chinese (Simplified)",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
]
"""
| [
"ylxx@live.com"
] | ylxx@live.com |
f236269f4776ca93d30e415a57352e31115a6d08 | 63519b144e8b2d881c8f6e99b9c61aae5ab408ca | /resample_topo_file.py | ae26bfab26aea5740e8dc612baab9465dfbfedc0 | [] | no_license | kujaku11/sandbox_scripts | 667d260ef42c3fe90c9543e0a938fdb104368700 | 080003cdae3a14fec5178d3e7a854d142ef3948c | refs/heads/master | 2023-08-10T18:27:30.463398 | 2023-08-05T01:28:29 | 2023-08-05T01:28:29 | 75,033,125 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:06:46 2019
@author: jpeacock
"""
import numpy as np
fn = r"c:\Users\jpeacock\Documents\Geothermal\GabbsValley\gis\gv_topo.txt"
resample = 6
with open(fn, "r") as fid:
nx = int(fid.readline().split()[1].strip())
ny = int(fid.readline().split()[1].strip())
x = float(fid.readline().split()[1].strip())
y = float(fid.readline().split()[1].strip())
cell = float(fid.readline().split()[1].strip())
nil = float(fid.readline().split()[1].strip())
topo = np.zeros((nx / resample, ny / resample), dtype=np.float)
for ii in range(ny / resample):
try:
line = fid.readline()
topo[:, ii] = np.array(line.strip().split(), dtype=np.float)[::resample]
except ValueError as error:
raise ValueError(error)
for jj in range(resample - 1):
fid.readline()
topo[np.where(topo == -9999)] = 0
with open(fn[0:-4] + "_150m.txt", "w") as nfid:
header = []
header.append("{0:14}{1:.0f}".format("ncols", topo.shape[0]))
header.append("{0:14}{1:.0f}".format("nrows", topo.shape[1]))
header.append("{0:14}{1:.11f}".format("xllcorner", x))
header.append("{0:14}{1:.11f}".format("yllcorner", y))
header.append("{0:14}{1:.11f}".format("cellsize", cell * resample))
header.append("{0:14}{1:.0f}".format("NODATA_value", nil))
nfid.write("\n".join(header))
nfid.write("\n")
for kk in range(topo.shape[1]):
out = np.char.mod("%.6g", topo[:, kk])
nfid.write(" ".join(out))
nfid.write("\n")
| [
"peacock.jared@gmail.com"
] | peacock.jared@gmail.com |
c3dfaa0899e4dab4d82c33038a74506baebc221a | 560e212b000df60325d6a3cddd225aa4af69a8f8 | /authentication/models.py | bf9cc5520e5f043e3db7e6a26072d08777b76001 | [] | no_license | msrshahrukh100/Roba-Square-Website | c36964ee08536d6cfc1e2ced99c4cc61f5c03ace | 3cfcbfc47541ae387bef9fe1e06c4046131841ba | refs/heads/master | 2021-01-19T19:31:16.947352 | 2019-05-02T12:58:44 | 2019-05-02T12:58:44 | 88,420,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | from django.db import models
from django.contrib.auth.models import User
from autoslug import AutoSlugField
from authentication.username import get_user_name
from django.dispatch.dispatcher import receiver
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from sorl.thumbnail import ImageField
from django.core.cache import cache
from django.contrib.auth.signals import user_logged_in, user_logged_out
def clear_the_cache(sender, user, request, **kwargs):
cache.clear()
user_logged_in.connect(clear_the_cache)
user_logged_out.connect(clear_the_cache)
# upload location for user profile pics
def upload_location_user(instance, filename) :
return "users/%s/%s" % (instance.user.id, filename)
# class for storing user information 1-1 qith the default user model
class UserInformation(models.Model) :
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='user_information')
change_profile_pic = ImageField(upload_to=upload_location_user,height_field='height_field',width_field='width_field',blank=True, null=True,default='default.jpg')
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
date_of_birth = models.CharField(max_length=20,blank=True, null=True)
phonenumber = models.CharField(max_length=15,blank=True, null=True)
profession = models.CharField(max_length=100, blank=True, null=True)
name_of_institute = models.CharField(max_length=200, blank=True, null=True)
showrecentlyviewed = models.BooleanField(default=True)
showfollowers = models.BooleanField(default=True)
showfollowing = models.BooleanField(default=True)
showdob = models.BooleanField(default=True)
slug = AutoSlugField(populate_from='user',unique=True)
def __unicode__(self) :
return str(self.user.username)
class Meta :
verbose_name = "User Information"
verbose_name_plural = "User Information"
def get_absolute_url(self):
return reverse("social:viewuser", kwargs={"slug": self.slug})
def get_image_url(self) :
if self.user.socialaccount_set.all().first() :
return self.user.socialaccount_set.all().first().get_avatar_url()
else :
return self.user.user_information.change_profile_pic
@receiver(post_save, sender=User)
def UserInformationreceiver(sender, instance, **kwargs):
UserInformation.objects.get_or_create(user=instance)
class Addresses(models.Model) :
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses')
address = models.CharField(max_length=300)
city = models.CharField(max_length=30)
pincode = models.PositiveIntegerField()
nearest_landmark = models.CharField(max_length=200, null=True, blank=True)
def __unicode__(self) :
return self.user.username
def get_remove_url(self) :
return reverse("authentication:removeaddress", kwargs={'id':self.id})
| [
"msr.concordfly@gmail.com"
] | msr.concordfly@gmail.com |
3575db317ab710ec595dfe6bf58cde5c8976f25f | f5807a07ad72be79d4626ce9fe4adbf6d9f32fd8 | /base.py | 93a3986b8ab2d67b417169c5971b26987241751e | [] | no_license | 15101538237ren/papers_collecting | 74ddeb708502bf62dfdd5fd734a515e6fd73986b | 0e9c4e24a8edac6f77f27f7b1b53ea2c9069f652 | refs/heads/master | 2018-11-04T18:47:01.170290 | 2018-08-26T22:32:17 | 2018-08-26T22:32:17 | 115,414,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib2, os
pkl_dir = 'pkl'
papers_dir = 'papers'
for dir_path in [pkl_dir, papers_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
paper_years = range(2013, 2018)
ip = '10.138.232.71'
port = '80'
timeout = 20
def request_url(paper_collection_name, url):
if paper_collection_name != "icml":
proxydict = {}
proxydict['http'] = "http://%s:%s"%(ip, port)
proxy_handler = urllib2.ProxyHandler(proxydict)
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),('Connection','keep-alive'),('Cookie','_ga=GA1.2.1314251887.1513847038; _gat=1; _gid=GA1.2.129016361.1514334078')]
urllib2.install_opener(opener)
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req,timeout=timeout)
return response.read()
except urllib2.URLError, e:
print e.reason
return 0
def schedule(a,b,c):
'''''
a:已经下载的数据块
b:数据块的大小
c:远程文件的大小
'''
per = 100.0 * a * b / c
if per > 100 :
per = 100
# print '%.2f%%' % per
| [
"renhongleiz@126.com"
] | renhongleiz@126.com |
1e9a333a9a9085b4606cf1e5bd53b40f54343772 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq860.py | 2953abe4bd2707aa6a6ffda17ae295e957dbe753 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=42
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.X.on(input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[1])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=34
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=35
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=31
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq860.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
7210e8c2d7640f4f2c4816aa750335f3e49bef62 | b76289396b22eda191f25744a600fac2abaf8850 | /hphp/tools/lldb/lookup.py | b2b6eaff45248b34068d08137fb0a176d2b21d29 | [
"MIT",
"PHP-3.01",
"Zend-2.0"
] | permissive | fengjixuchui/hhvm | cb8cece7afd025fb8cdf8479c2a0696f38730949 | bbbb1782fa258b8dd526ffc7e8ba0f6115931bff | refs/heads/master | 2023-03-15T15:55:46.355422 | 2023-01-27T13:59:08 | 2023-01-27T13:59:08 | 175,142,159 | 0 | 1 | NOASSERTION | 2021-11-03T11:22:20 | 2019-03-12T05:34:16 | C++ | UTF-8 | Python | false | false | 4,595 | py | import lldb
import shlex
try:
# LLDB needs to load this outside of the usual Buck mechanism
import idx
import utils
except ModuleNotFoundError:
import hhvm_lldb.idx as idx
import hhvm_lldb.utils as utils
def lookup_func(func_id: lldb.SBValue) -> lldb.SBValue:
""" Find the function corresponding to a given FuncID
Args:
func_id: A HPHP::FuncId wrapped in an lldb.SBValue
Returns:
func: A HPHP::Func* wrapped in an lldb.SBValue
"""
target = func_id.target
assert func_id.type == utils.Type("HPHP::FuncId", target), f"invalid func_id, type given is {func_id.type.name}"
func_vec = utils.Global("HPHP::Func::s_funcVec", target)
if func_vec.IsValid():
# Not LowPtr
func_id_val = utils.get(func_id, "m_id").unsigned
result = idx.atomic_low_ptr_vector_at(func_vec, func_id_val)
assert result.IsValid(), "returned invalid HPHP::Func"
else:
# TODO test this code path
# LowPtr
result = utils.rawptr(utils.get(func_id, 'm_id'))
func_ptr = result.Cast(utils.Type('HPHP::Func', target).GetPointerType())
assert func_ptr.IsValid(), "couldn't return HPHP::Func *"
return func_ptr
def lookup_func_from_frame_pointer(fp: lldb.SBValue) -> lldb.SBValue:
""" Get the jitted function pointed to by the given frame pointer.
Args:
fp: Activation record (HPHP::ActRec)
Returns:
func: An SBValue representing a HPHP::Func*
"""
func_id = utils.get(fp, 'm_funcId')
return lookup_func(func_id)
class LookupCommand(utils.Command):
command = "lookup"
description = "Look up HHVM runtime objects by ID"
class ArgsNamespace: # noqa: B903
# argparse will add attributes to this class
def __init__(self, exe_ctx: lldb.SBExecutionContext, result: lldb.SBCommandReturnObject):
self.exe_ctx = exe_ctx
self.result = result
@classmethod
def create_parser(cls):
parser = cls.default_parser()
subparsers = parser.add_subparsers(title="List of lookup subcommands")
func_cmd = subparsers.add_parser(
"func",
help="Look up a Func* by its FuncId",
)
func_cmd.add_argument(
"funcid",
help="A HPHP::FuncId (i.e. int) uniquely identifying a HPHP::Func*"
)
func_cmd.set_defaults(func=cls._lookup_func_prep)
litstr_cmd = subparsers.add_parser(
"litstr",
help="Look up a litstr StringData* by its Id and Unit*",
epilog="If no Unit is given, the current unit (set by `unit`) is used.",
)
litstr_cmd.add_argument(
"id",
help="The ID of the desired StringData (i.e. an HPHP::Id)",
)
litstr_cmd.add_argument(
"unit",
nargs="?",
help="The unit to use",
)
litstr_cmd.set_defaults(func=cls._lookup_litstr_prep)
return parser
def __init__(self, debugger, internal_dict):
super().__init__(debugger, internal_dict)
def __call__(self, debugger, command, exe_ctx, result):
namespace = self.ArgsNamespace(exe_ctx, result)
command_args = shlex.split(command)
try:
options = self.parser.parse_args(command_args, namespace=namespace)
options.func(options)
except SystemExit:
result.SetError("option parsing failed")
return
@classmethod
def _lookup_func_prep(cls, options):
func_id_type = utils.Type("HPHP::FuncId", options.exe_ctx.target)
func_id = options.exe_ctx.frame.EvaluateExpression(options.funcid).Cast(func_id_type)
res = lookup_func(func_id)
if res is None:
options.result.SetError(f"cannot get function identified with FuncId {func_id}")
return
options.result.write(str(res))
@classmethod
def _lookup_litstr_prep(cls, options):
raise NotImplementedError
def __lldb_init_module(debugger, _internal_dict, top_module=""):
""" Register the commands in this file with the LLDB debugger.
Defining this in this module (in addition to the main hhvm module) allows
this script to be imported into LLDB separately; LLDB looks for a function with
this name at module load time.
Arguments:
debugger: Current debugger object
_internal_dict: Dict for current script session. For internal use by LLDB only.
Returns:
None
"""
LookupCommand.register_lldb_command(debugger, __name__, top_module)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e27d3ae4da61d77e608163c32861f19c4bc6f1e5 | 56bd9b3518f21080a0493f5330249bf5e85289fd | /patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/joyent.py | dcf15dd66846793bd7529d7eb9cc9e240734edac | [
"Apache-2.0"
] | permissive | kevin-zhangsen/badam | da680bf8669722b5bc922381537bc4762fa5c228 | 6823f7dcd7c1b54c3b38edeffe59c16317598a2c | refs/heads/master | 2020-04-01T13:43:03.300155 | 2015-10-29T01:07:46 | 2015-10-29T01:07:46 | 45,371,347 | 2 | 0 | null | 2015-11-02T04:02:50 | 2015-11-02T04:02:47 | null | UTF-8 | Python | false | false | 8,019 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Joyent Cloud (http://www.joyentcloud.com) driver.
"""
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.types import LibcloudError
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.types import NodeState, InvalidCredsError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
from libcloud.utils.networking import is_private_subnet
API_HOST_SUFFIX = '.api.joyentcloud.com'
API_VERSION = '~6.5'
NODE_STATE_MAP = {
'provisioning': NodeState.PENDING,
'running': NodeState.RUNNING,
'stopping': NodeState.TERMINATED,
'stopped': NodeState.TERMINATED,
'deleted': NodeState.TERMINATED
}
VALID_REGIONS = [
'us-east-1', 'us-east-2', 'us-east-3',
'us-west-1',
'us-sw-1',
'eu-ams-1'
]
DEFAULT_REGION = 'us-east-1'
class JoyentResponse(JsonResponse):
"""
Joyent response class.
"""
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
data = self.parse_body()
raise InvalidCredsError(data['code'] + ': ' + data['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
class JoyentConnection(ConnectionUserAndKey):
"""
Joyent connection class.
"""
responseCls = JoyentResponse
allow_insecure = False
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
headers['X-Api-Version'] = API_VERSION
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class JoyentNodeDriver(NodeDriver):
"""
Joyent node driver class.
"""
type = Provider.JOYENT
name = 'Joyent'
website = 'http://www.joyentcloud.com'
connectionCls = JoyentConnection
features = {'create_node': ['generates_password']}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
# Location is here for backward compatibility reasons
if 'location' in kwargs:
region = kwargs['location']
if region not in VALID_REGIONS:
msg = 'Invalid region: "%s". Valid region: %s'
raise LibcloudError(msg % (region,
', '.join(VALID_REGIONS)), driver=self)
super(JoyentNodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, region=region,
**kwargs)
self.connection.host = region + API_HOST_SUFFIX
def list_images(self):
result = self.connection.request('/my/datasets').object
images = []
for value in result:
extra = {'type': value['type'], 'urn': value['urn'],
'os': value['os'], 'default': value['default']}
image = NodeImage(id=value['id'], name=value['name'],
driver=self.connection.driver, extra=extra)
images.append(image)
return images
def list_sizes(self):
result = self.connection.request('/my/packages').object
sizes = []
for value in result:
size = NodeSize(id=value['name'], name=value['name'],
ram=value['memory'], disk=value['disk'],
bandwidth=None, price=0.0,
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_nodes(self):
result = self.connection.request('/my/machines').object
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def reboot_node(self, node):
data = json.dumps({'action': 'reboot'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def destroy_node(self, node):
result = self.connection.request('/my/machines/%s' % (node.id),
method='DELETE')
return result.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
name = kwargs['name']
size = kwargs['size']
image = kwargs['image']
data = json.dumps({'name': name, 'package': size.id,
'dataset': image.id})
result = self.connection.request('/my/machines', data=data,
method='POST')
return self._to_node(result.object)
def ex_stop_node(self, node):
"""
Stop node
:param node: The node to be stopped
:type node: :class:`Node`
:rtype: ``bool``
"""
data = json.dumps({'action': 'stop'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def ex_start_node(self, node):
"""
Start node
:param node: The node to be stopped
:type node: :class:`Node`
:rtype: ``bool``
"""
data = json.dumps({'action': 'start'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def ex_get_node(self, node_id):
"""
Return a Node object based on a node ID.
:param node_id: ID of the node
:type node_id: ``str``
:return: A Node object for the node
:rtype: :class:`Node`
"""
result = self.connection.request('/my/machines/%s' % (node_id))
return self._to_node(result.object)
def _to_node(self, data):
state = NODE_STATE_MAP[data['state']]
public_ips = []
private_ips = []
extra = {}
for ip in data['ips']:
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
if 'credentials' in data['metadata']:
extra['password'] = data['metadata']['credentials']['root']
node = Node(id=data['id'], name=data['name'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
| [
"nash.xiejun@gmail.com"
] | nash.xiejun@gmail.com |
61d5e23c19070da8ec467f74852e7096b33ab380 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1905/day09/spider_day09_course/day09/Maoyan/Maoyan/settings.py | c4dbf33b7ab54d502aa651701fc4157d30dcb0bc | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Maoyan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Maoyan'
SPIDER_MODULES = ['Maoyan.spiders']
NEWSPIDER_MODULE = 'Maoyan.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Maoyan (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
LOG_LEVEL = 'INFO'
# 设置导出编码
FEED_EXPORT_ENCODING = 'utf-8'
# LOG_FILE = 'maoyan.log'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Maoyan.middlewares.MaoyanSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Maoyan.middlewares.MaoyanDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Maoyan.pipelines.MaoyanPipeline': 300,
'Maoyan.pipelines.MaoyanMysqlPipeline' : 200,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 定义mysql相关变量
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PWD = '123456'
MYSQL_DB = 'maoyandb'
CHARSET = 'utf8'
| [
"598467866@qq.com"
] | 598467866@qq.com |
cf4349e66001ff7285d399063205aebeb68ba443 | fe06311a7de13a02ca0be37d84c542c3cece3f33 | /Chapter38/file_38_2a.py | 8fd1b4c07a9deadcb2e9460ccbcdfe4b3dbce9c8 | [] | no_license | mooksys/Python_Algorithms | a4a84ddabc34ec4b7cc0ac01d55019880af38514 | 375817e3dfdec94411cf245fe3f685a69d92b948 | refs/heads/master | 2020-08-24T06:35:05.791979 | 2018-07-30T01:22:24 | 2018-07-30T01:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def display_line():
print("-----------------------------")
# 메인 코드
print("안녕하세요!")
display_line()
print("어떻게 지내세요?")
display_line()
print("당신의 이름은 무엇입니까?")
display_line()
| [
"jeipubmanager@gmail.com"
] | jeipubmanager@gmail.com |
8f14c453c8eb93b243a01aaf1d3cbb6e7c511f2a | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_TruncatedLFM.py | 5972eb45ef92ce35b93b076421fd884df883530c | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_TruncatedLFM [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_TruncatedLFM&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-lfmtrunc).
# ## Prepare the environment
# +
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import array, zeros, diag, eye, r_
from numpy.linalg import pinv
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# input parameters
n_ = 2 # target dimension
k_ = 1 # number of factors
sig2_XZ = array([[4, 9.5, -1.8], [9.5, 25, -4.5], [-1.8, -4.5, 1]]) # joint covariance of target variables and factors
# -
# ## Compute optimal loadings
# +
sig_XZ = sig2_XZ[:n_, n_:n_ + k_]
sig2_Z = sig2_XZ[n_:n_ + k_, n_:n_ + k_]
b = sig_XZ.dot(pinv(sig2_Z))
# -
# ## Compute truncated joint covariance of residuals and factors
m = r_[r_['-1', eye(n_), -b], r_['-1', zeros((k_, n_)), eye(k_)]]
sig2_UZ = m@sig2_XZ@m.T
sig2_UZtrunc = r_[r_['-1', np.diagflat(diag(sig2_UZ[:n_, :n_])), zeros((n_, k_))], r_[
'-1', zeros((k_, n_)), sig2_UZ[n_:n_ + k_, n_:n_ + k_]]]
# ## Compute truncated covariance of target variables
m_tilde = r_['-1', eye(n_), b]
sig2_Xtrunc = m_tilde@sig2_UZtrunc@m_tilde.T
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.