blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d521724116b490a6181f5b3f286c4bc901268838 | 93ff3a214354128910c5c77824c64678d78e556d | /downloads/views.py | cd885baeb2cf1ca88ff849bf9cbe45dc2e079bad | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | samoKrajci/roots | 1fc2c7f205ba9dc0d9026026253c7349c3a551aa | 9c6bf6ed30e8e6ff9099e9dca6d56a2df2ef10b0 | refs/heads/master | 2021-09-23T02:20:17.927687 | 2017-01-01T19:05:22 | 2017-01-01T19:05:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | from sendfile import sendfile
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
@login_required
def download_protected_file(request, model_class, path_prefix, path):
"""
This view allows download of the file at the specified path, if the user
is allowed to. This is checked by calling the model's can_access_files
method.
"""
# filepath is the absolute path, mediapath is relative to media folder
filepath = settings.SENDFILE_ROOT + path_prefix + path
filepath_mediapath = path_prefix + path
if request.user.is_authenticated():
# Superusers can access all files
if request.user.is_superuser:
return sendfile(request, filepath)
else:
# We need to check can_access_files on particular instance
obj = model_class.get_by_filepath(filepath_mediapath)
if obj is not None and obj.can_access_files(request.user):
return sendfile(request, filepath)
raise PermissionDenied
| [
"tomasbabej@gmail.com"
] | tomasbabej@gmail.com |
b2604be8d025306b86a7c2085b36874e3b15e52b | 61b86155af0ba6b052172f1ec22fb16db85a4139 | /pii-detector/wpapper/address-generator/app.py | d1f33256c9599a944a762d727c92ac3ecbe06681 | [] | no_license | exsmiley/counter-dox-9001 | 010eedfd530ec9a51d98e2b1eba45c66256d8841 | 4306f78785b3f2a5c5c585c2a1ee40ecb117651b | refs/heads/master | 2021-03-19T18:40:28.571024 | 2018-02-18T11:02:52 | 2018-02-18T11:02:52 | 121,838,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | from flask import Flask
from faker import Faker
fake = Faker()
app = Flask(__name__)
@app.route('/fake')
def get_fake():
return fake.address()
| [
"contact@papper.me"
] | contact@papper.me |
2bcab1d17e72142051b25c45ac0f9e27124939ed | fda60b266df8f793e7ad19fa9caaf6a149a84064 | /homework4/cham/hw4_1_3.py | 1666fe173c3a73bf36f77c6381a9838deadb38a2 | [] | no_license | hoangi19/tkdgtt | eb589b75438441c992a2126b59195740a64ba286 | 36f0f51930f119d0a3f7ee37f3584d8a55e914ee | refs/heads/master | 2022-10-30T00:35:04.238463 | 2020-06-10T12:18:50 | 2020-06-10T12:18:50 | 262,357,661 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # Liet ke cac day nhi phan co do dai n
n = int(input("Day nhi phan co do dai n = "))
a = [0]*n #list a co n phan tu gia tri 0
def lietke(i):
for v in [0,1]:
a[i] = v
if i==n-1:
print(*a)
else:
lietke(i+1)
lietke(0)
| [
"viethoanghy99@gmail.com"
] | viethoanghy99@gmail.com |
310c8ae150190d6740b6121ace9773d0a661e430 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/11/32/13.py | c3ba2816bd0c1116751668d1ffb42f0e53b1d0e5 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | py | filename = "B-large.in"
outputname = filename + "out.txt"
inFile = open(filename, 'r')
outFile = open(outputname, 'w')
numCases = int(inFile.readline())
def getTime(fullGapList, ttb, numStations):
currTime = 0
counter = 0
while currTime < ttb and counter < len(fullGapList):
currTime += fullGapList[counter]*2
counter += 1
if counter == len(fullGapList):
return sum(fullGapList)*2
newGapList = fullGapList[counter:]
if currTime != ttb:
newGapList += [(currTime - ttb)/2]
newGapList.sort()
newGapList.reverse()
stations = newGapList[0:numStations]
return sum(fullGapList)*2 - sum(stations)
for i in range(numCases):
print i
nextLine = inFile.readline().split()
numStations = int(nextLine[0])
timeToBuild = int(nextLine[1])
numStars = int(nextLine[2])
numGaps = int(nextLine[3])
gapList = []
for j in range(numGaps):
gapList += [int(nextLine[4+j])]
fullGapList = []
while len(fullGapList) < numStars:
fullGapList += gapList
fullGapList = fullGapList[0:numStars]
answer = getTime(fullGapList, timeToBuild, numStations)
outFile.write("Case #" + str(i+1) + ": " + str(answer) + "\n")
inFile.close()
outFile.close()
def oneStation(fullGapList, pos, ttb):
priorTime = sum(fullGapList[0:pos])*2
afterTime = sum(fullGapList[pos+1:])*2
if priorTime > ttb:
return priorTime + fullGapList[pos] + afterTime
elif priorTime + 2*fullGapList[pos] < ttb:
return priorTime + 2*fullGapList[pos] + afterTime
else:
return priorTime + (ttb-priorTime)/2 + fullGapList[pos] + afterTime
def twoStation(fullGapList, pos1, pos2, ttb):
priorTime = sum(fullGapList[0:pos1])*2
if priorTime > ttb:
afterBoost = priorTime + fullGapList[pos1]
elif priorTime + 2*fullGapList[pos1] < ttb:
afterBoost = priorTime + 2*fullGapList[pos1]
else:
afterBoost = priorTime + (ttb-priorTime)/2 + fullGapList[pos1]
priorTime = afterBoost + sum(fullGapList[pos1+1:pos2])*2
if priorTime > ttb:
afterBoost = priorTime + fullGapList[pos2]
elif priorTime + 2*fullGapList[pos2] < ttb:
afterBoost = priorTime + 2*fullGapList[pos2]
else:
afterBoost = priorTime + (ttb-priorTime)/2 + fullGapList[pos2]
return afterBoost + sum(fullGapList[pos2+1:])*2
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
1ac38f27c28376b90ec2804af78323ee6e419204 | a2a55863b02704abd82dcaf915c86256ed2f34ca | /rabbitmq/py/test1/recv.py | 82a06f0696025786b693c9c161a52b31e3f7661c | [] | no_license | evilsmile/OpenSourceLearn | 5e4859e78a514da4c24c3aae9c15d02dc86bc603 | e660f066f634a4ae3021dd390d187f53513f6d1b | refs/heads/master | 2020-04-07T01:17:39.527435 | 2019-06-14T09:21:15 | 2019-06-14T09:21:15 | 58,425,995 | 0 | 0 | null | 2019-10-31T11:04:47 | 2016-05-10T03:10:30 | Erlang | UTF-8 | Python | false | false | 405 | py | #!/usr/bin/python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
print "[*] Waiting for Messages. To exit press CTRL+C"
def callback(ch, method, properties, body):
print "[X] Received %r" % (body, )
channel.basic_consume(callback, queue='hello', no_ack=True)
channel.start_consuming()
| [
"freedominmind@163.com"
] | freedominmind@163.com |
396fc78c6deed3bed86bd2e205d2cd7e3306ecc9 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/infra/rsvpcbndlgrp.py | 1881c27187fd0259b96e7e54cb6987147cafcdb7 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,141 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsVpcBndlGrp(Mo):
"""
A source relation to the bundle interface group.
"""
meta = SourceRelationMeta("cobra.model.infra.RsVpcBndlGrp", "cobra.model.infra.AccBndlGrp")
meta.cardinality = SourceRelationMeta.N_TO_M
meta.moClassName = "infraRsVpcBndlGrp"
meta.rnFormat = "rsvpcBndlGrp-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "PC/VPC Interface Policy Group"
meta.writeAccessMask = 0x100000000001
meta.readAccessMask = 0x100000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fabric.CreatedBy")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childNamesAndRnPrefix.append(("cobra.model.fabric.CreatedBy", "source-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.infra.NodeCfg")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.fabric.NodeToPolicy")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsvpcBndlGrp-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "deplSt", "deplSt", 15582, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("delivered", "delivered", 1)
prop._addConstant("node-not-ready", "node-not-ready", 1073741824)
prop._addConstant("none", "none", 0)
prop._addConstant("not-registered-for-atg", "node-cannot-deploy-epg", 64)
prop._addConstant("not-registered-for-fabric-ctrls", "node-not-controller", 16)
prop._addConstant("not-registered-for-fabric-leafs", "node-not-leaf-for-fabric-policies", 4)
prop._addConstant("not-registered-for-fabric-node-group", "node-not-registered-for-node-group-policies", 32)
prop._addConstant("not-registered-for-fabric-oleafs", "node-not-capable-of-deploying-fabric-node-leaf-override", 2048)
prop._addConstant("not-registered-for-fabric-ospines", "node-not-capable-of-deploying-fabric-node-spine-override", 4096)
prop._addConstant("not-registered-for-fabric-pods", "node-has-not-joined-pod", 8)
prop._addConstant("not-registered-for-fabric-spines", "node-not-spine", 2)
prop._addConstant("not-registered-for-infra-leafs", "node-not-leaf-for-infra-policies", 128)
prop._addConstant("not-registered-for-infra-oleafs", "node-not-capable-of-deploying-infra-node-leaf-override", 512)
prop._addConstant("not-registered-for-infra-ospines", "node-not-capable-of-deploying-infra-node-spine-override", 1024)
prop._addConstant("not-registered-for-infra-spines", "node-not-spine-for-infra-policies", 256)
prop._addConstant("pod-misconfig", "node-belongs-to-different-pod", 8192)
prop._addConstant("policy-deployment-failed", "policy-deployment-failed", 2147483648)
meta.props.add("deplSt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 15611, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13161, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4406
prop.defaultValueStr = "infraAccBndlGrp"
prop._addConstant("infraAccBndlGrp", None, 4406)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 13160, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
0ca7a55823766868ef8a426788307d7b230bcd74 | a042315d28c3e8914abdad8b3949f393a03d18c5 | /src/models/legendre_duality_toy_interp/model.py | 48f265d0acb6372a5ab13d4c9df312f117545a6a | [
"MIT"
] | permissive | lavoiems/NeuralWassersteinFlow | 76e80971a24f5f2d28023c18f81e5388194751f6 | b120778d75fc7afc9b6a56724768ab39ad7c0b91 | refs/heads/master | 2022-04-15T19:00:46.474142 | 2020-04-10T22:40:33 | 2020-04-10T22:40:33 | 225,720,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | import torch
from torch import nn
class Critic(nn.Module):
def __init__(self, i_dim, h_dim, **kwargs):
super(Critic, self).__init__()
x = [nn.Linear(i_dim+1, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, 1)]
self.x = nn.Sequential(*x)
def forward(self, x, t):
o = torch.cat((x,t),1)
return self.x(o).squeeze()
class Generator(nn.Module):
def __init__(self, o_dim, z_dim, h_dim, **kwargs):
super(Generator, self).__init__()
x = [nn.Linear(o_dim+1, h_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(h_dim, o_dim)]
self.x = nn.Sequential(*x)
def forward(self, z, t):
return self.x(torch.cat((z, t), 1))
| [
"samuel.lavoie.m@gmail.com"
] | samuel.lavoie.m@gmail.com |
ab4e73de38b60f3eace6b12186f95914e219c73f | 315d69e851b6faac40553c3c494f7681ffe783a3 | /KulliSharif/KulliSharifapp/serializers.py | a220f53fb0edd2796c425096163bac9c165766b5 | [
"MIT"
] | permissive | iqran-star/API-PROJECT | b6f1293f7b8fe45126ce96ddded2f1a2b94ca07f | 77d021098fd17b4ce086f8e6f914a7722fa9b558 | refs/heads/main | 2023-07-24T20:36:39.297576 | 2021-08-16T09:06:34 | 2021-08-16T09:06:34 | 396,709,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from rest_framework import serializers
from .models import *
class EventSerializers(serializers.ModelSerializer):
class Meta:
model=Events
fields='__all__'
class ProjectSerializers(serializers.ModelSerializer):
class Meta:
model=Projects
fields='__all__'
class VideoSerializers(serializers.ModelSerializer):
class Meta:
model=Videos
fields='__all__'
class BooksSerializers(serializers.ModelSerializer):
class Meta:
model=Books
fields='__all__'
| [
"khan@gmail.com"
] | khan@gmail.com |
26e3adefa2e96ed18b13e2448b6fdad2336a3893 | cf6a9086157598a95458bdd4185c3ea89af74f75 | /newtest.py | 13674149afdfcbc75f819a1135c74af39b897ecd | [] | no_license | KyungrokH/testing- | b26811efbb549260a177307202551bb0211939bc | 38762224543d423b184486ad0eb428f93632eee9 | refs/heads/master | 2022-12-05T01:20:33.685789 | 2020-08-12T21:19:06 | 2020-08-12T21:19:06 | 287,074,902 | 0 | 0 | null | 2020-08-12T21:19:07 | 2020-08-12T17:29:20 | null | UTF-8 | Python | false | false | 64 | py | ## adding file in the new branch
print ("file in test_branch")
| [
"noreply@github.com"
] | noreply@github.com |
bb6602a7854840f66e247e8cbf478158d0ad52eb | 131d655a07e9341c18173d34c4bad7e214ed28d4 | /windowsandlabels.py | a8ae8b7a35b29aac2e0bd7f6ed1a65be8d189d2d | [] | no_license | Harsh200/pythontkinter | d5ca4329e36346393ac28ce83e9df8e6b61654eb | d81914fb95e3e18c933b77fab5efb2bcc19aba94 | refs/heads/master | 2020-04-24T10:46:28.534256 | 2019-02-21T17:01:16 | 2019-02-21T17:01:16 | 171,905,022 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import tkinter
window=tkinter.Tk()
lbhello = tkinter.Label(window,text="Hello World")
lbhello.pack
window.mainloop()
| [
"harsh.saxena_ccv17@gla.ac.in"
] | harsh.saxena_ccv17@gla.ac.in |
9bdb23c1d36a920a54a4a88f4c8e96b6ad9d5338 | 135705380fd6496c8629b31510622a72a86e4795 | /guestbook/__init__.py | 857d8ca01768ac3f30d21ebe110f281f8c5b0e48 | [] | no_license | c-nova/guestbook | aa1b577835ba291b95fa4fc2bfa24971205fec3c | c83c43957c0e06b091218e725a8a2a31e29e1174 | refs/heads/master | 2016-08-11T19:05:42.008387 | 2016-01-06T03:47:30 | 2016-01-06T03:47:30 | 49,036,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,738 | py | # coding: utf-8
import shelve
from datetime import datetime
from flask import Flask, request, render_template, redirect, escape, Markup
application = Flask(__name__)
DATA_FILE = 'guestbook.dat'
def save_data(name, comment, create_at):
"""投稿データを保持します
"""
# shelveモジュールでデータベースファイルを開きます
database = shelve.open(DATA_FILE)
# データベースにgreeting_listがなければ、新しくリストを作ります
if 'greeting_list' not in database:
greeting_list = []
else:
# データベースからデータを取得します
greeting_list = database['greeting_list']
# リストの先頭に投稿データを追加します
greeting_list.insert(0, {
'name': name,
'comment': comment,
'create_at': create_at,
})
# データベースを更新します
database['greeting_list'] = greeting_list
# データベースファイルを閉じます
database.close()
def load_data():
"""投稿されたデータを返します
"""
# shelveモジュールでデータベースファイルを開きます
database = shelve.open(DATA_FILE)
# greeting_listを返します。データがなければ空のリストを返します
greeting_list = database.get('greeting_list', [])
database.close()
return greeting_list
@application.route('/')
def index():
"""トップページ
テンプレートを使用してページを表示します
"""
greeting_list = load_data()
return render_template('index.html', greeting_list=greeting_list)
@application.route('/post', methods=['POST'])
def post():
"""投稿用URL
"""
# 投稿されたデータを取得します
name = request.form.get('name') #名前
comment = request.form.get('comment') #コメント
create_at = datetime.now() #投稿日時(現在時間)
# データを保存します
save_data(name, comment, create_at)
# 保存後はトップページにリダイレクトします
return redirect('/')
@application.template_filter('nl2br')
def nl2br_filter(s):
"""改行文字をbrタグに置き換えるテンプレートフィルタ
"""
return escape(s).replace('\n', Markup('<br>'))
@application.template_filter('datetime_fmt')
def datetime_fmt_filter(dt):
"""datetimeオブジェクトを見やすい表示にすうrテンプレートフィルタ
"""
return dt.strftime('%Y/%m/%d %H:%M:%S')
def main():
application.run('127.0.0.1', 8000)
if __name__ == '__main__':
# IPアドレス127.0.0.1の8000番ポートでアプリケーションを実行します
application.run('127.0.0.1', 8000, debug=True)
| [
"iguazudemo2@gmail.com"
] | iguazudemo2@gmail.com |
c3ab9466356c578a57a12b37f52986a52084c61f | 7ac11fbd946902189cad24b893128003e3df8211 | /instance/config.py | 1c955fa164465b3af0ebfab8cf58aa6d4d3b610b | [] | no_license | maxwellkimutai/Issue-Tracker-Flask | 6208b9712f767b09ce2f4026eb17eb3addfe10f6 | b1b9b2b50f8db700a10f99936f157654f596f4e4 | refs/heads/master | 2020-04-12T09:10:10.584044 | 2018-12-17T14:58:55 | 2018-12-17T14:58:55 | 154,103,371 | 0 | 4 | null | 2018-10-25T07:05:31 | 2018-10-22T07:32:37 | Python | UTF-8 | Python | false | false | 24 | py | SECRET_KEY='trackerkey'
| [
"maxwellkimutai@gmail.com"
] | maxwellkimutai@gmail.com |
3bb95ced81396f906f7822e77e1d040cd8901b31 | d33b2ce08591d23b06ab466f5dd6e302e3d4af2f | /fgcz_biobeamer.py.bak | 36ca192e64d75ab8a4deb5d3140b2ca66875ef94 | [] | no_license | Python3pkg/BioBeamer | 8b5fceb94664dbe7ce15603276f9628bbe6d25ca | 61dc1299fb47ece91ff9a7d333149cb2bfd500f3 | refs/heads/master | 2021-01-21T09:28:49.967511 | 2017-05-18T06:10:05 | 2017-05-18T06:10:05 | 91,655,529 | 0 | 0 | null | 2017-05-18T06:10:03 | 2017-05-18T06:10:03 | null | UTF-8 | Python | false | false | 1,595 | bak | #!/usr/bin/python
# -*- coding: latin1 -*-
"""
Copyright 2006-2015 Functional Genomics Center Zurich
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Author / Maintainer: Christian Panse <cp@fgcz.ethz.ch>, Witold E. Wolski <wew@fgcz.ethz.ch>
"""
# pip install PyFGCZ
import biobeamer
import sys
import socket
import time
configuration_url = "http://fgcz-s-021.uzh.ch/config/"
if __name__ == "__main__":
print( "hostname is {0}.".format(socket.gethostname()))
bio_beamer = biobeamer.Robocopy()
biobeamer_xsd = "{0}/BioBeamer.xsd".format(configuration_url)
biobeamer_xml = "{0}/BioBeamer.xml".format(configuration_url)
bio_beamer.para_from_url(xsd=biobeamer_xsd,
xml=biobeamer_xml)
bio_beamer.run()
time.sleep(5)
BBChecker = biobeamer.Checker()
BBChecker.para_from_url(xsd=biobeamer_xsd,
xml=biobeamer_xml)
BBChecker.run()
sys.stdout.write("done. exit 0\n")
time.sleep(5)
sys.exit(0)
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
0d7f42721b6eacdb5d33d48f5de654197b6e11ed | 750684eb0746831a0a386563c1741b7d77688be3 | /Code/Practice/Django/nsurds/rds/serializers.py | 89c2e6e1923f7a391a892de8c0114629b4cd4eb6 | [] | no_license | NSU-FA20-CSE299-2/Group04 | baa8b9135c6dd1dccc58cc3d57eb3687f3f33ab2 | fad1a7d158cb9098aa8b502a9e8a9a464323e44e | refs/heads/main | 2023-02-21T11:37:19.250373 | 2021-01-19T09:09:38 | 2021-01-19T09:09:38 | 309,945,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from rest_framework import serializers
from .models import student
class studentSerializer(serializers.ModelSerializer):
class Meta:
model = student
fields = '__all__'
| [
"abdullah.mahfuj@northsouth.edu"
] | abdullah.mahfuj@northsouth.edu |
7f2cd7da5bd2d83385ca30d7cfaefa4f25d1f118 | d30e6c0e68955f27f1f81c898283c36b1f21954d | /my_pipeline.py | cdbd57e28392c109c3d3a524763eafead64eaf9e | [] | no_license | chStaiger/ACES | 0d9bb8ad5cf435a0b5ae6a8fe756b458026b78a1 | 33892fb2562cdb5b3784545ceb103a3c36db2168 | refs/heads/master | 2021-01-10T13:28:39.974974 | 2016-03-08T15:14:11 | 2016-03-08T15:14:11 | 53,335,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,817 | py | # @Author
# Christine Staiger
# staiger@cwi.nl; staigerchristine@gmail.com
# July 2013
from picas.clients import CouchClient
from couchdb import Server
from picas.modifiers import BasicTokenModifier
from picas.actors import RunActor
from picas.iterators import BasicViewIterator
from SetUpGrid import SetUpRun, RunInstance, splitData
class ExampleActor(RunActor):
def __init__(self, iterator, modifier, nrFolds):
self.iterator = iterator
self.modifier = modifier
self.client = iterator.client
self.db = self.client.db
self.nrFolds = nrFolds
def prepare_env(self, *kargs, **kvargs):
pass
def prepare_run(self, *kargs, **kvargs):
pass
def process_token(self, ref, token):
dataset = token['input']['dataset']
network = token['input']['network']
method = token['input']['method']
specific = token['input']['specific']
repeat = token['input']['repeat']
fold = token['input']['fold']
shuffleNr = token['input']['shuffleNr']
print 'dataset:', dataset
print 'network', network
print 'method', method
print 'specific', specific
print 'repeat', repeat
print 'fold', fold
print 'shuffleNr', shuffleNr
innerCV = False
if 'innerFold' in token['input']:
innerCV = True
innerfold = token['input']['innerFold']
innerrepeat = token['input']['innerRepeat']
(data, net, featureSelector, classifiers, Dataset2Time) = SetUpRun(dataset, network, method)
if not innerCV:
if specific == True or specific == False:
(dataName, featureExtractorproductName, netName, shuffle, featureExtractor, AucAndCi) = RunInstance(data, net,
featureSelector, specific, classifiers, repeat, self.nrFolds, fold, shuffleNr, Dataset2Time, specific)
else:
(dataName, featureExtractorproductName, netName, shuffle, featureExtractor, AucAndCi) = RunInstance(data, net,
featureSelector, specific, classifiers, repeat, self.nrFolds, fold, shuffleNr, Dataset2Time)
else:
dsOuterTraining, dsOuterTesting,_ = splitData(data, repeat, fold, self.nrFolds)
print 'dsOuterTraining', dsOuterTraining
print 'dsOuterTesting', dsOuterTesting
if specific == True or specific == False:
(dataName, featureExtractorproductName, netName, shuffle, featureExtractor, AucAndCi) = RunInstance(dsOuterTraining, net,
featureSelector, specific, classifiers, innerrepeat, self.nrFolds, innerfold, shuffleNr, Dataset2Time, specific)
else:
(dataName, featureExtractorproductName, netName, shuffle, featureExtractor, AucAndCi) = RunInstance(dsOuterTraining, net,
featureSelector, specific, classifiers, innerrepeat, self.nrFolds, innerfold, shuffleNr, Dataset2Time)
token = self.modifier.close(token)
token['output'] = (dataName, featureExtractorproductName, netName, shuffleNr, shuffle, featureExtractor, AucAndCi)
self.db[token['_id']] = token
def cleanup_run(self, *kargs, **kvargs):
pass
def cleanup_env(self, *kargs, **kvargs):
pass
def main():
nrFolds = 5
client = CouchClient(url="http://<server>:<port>", db="<dbname>", username="<username>", password="***")
#client = CouchClient(url="https://picas.grid.sara.nl:6984", db="aces", username="cstaiger", password="dq4^#sdk")
modifier = BasicTokenModifier()
iterator = BasicViewIterator(client, 'run1/todo', modifier)
actor = ExampleActor(iterator, modifier, nrFolds)
actor.run()
if __name__ == '__main__':
main()
| [
"christine.staiger@surfsara.nl"
] | christine.staiger@surfsara.nl |
a659a53ddd947fc5faaa1ec8face985450084653 | 84b000ed863b5899d1aef9415df3852e4775dd72 | /Conditional_Statements_Advanced/Exercise/P01Cinema.py | 981b0dfbe7f16a7fb2c3725d10f2dadeeacad93f | [] | no_license | jeniia/Python_Basic_SoftUni_2021 | 8dce327056b4b32807fbfe339d1df7ce1517cc38 | c1ad138167ea9530a376e128849176e76144c878 | refs/heads/main | 2023-04-29T03:11:48.323885 | 2021-05-21T15:47:37 | 2021-05-21T15:47:37 | 369,575,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | screening_type = input()
rows = int(input())
columns = int(input())
income = 0
cinema_capacity = rows * columns
if screening_type == "Premiere":
income = cinema_capacity * 12.00
elif screening_type == "Normal":
income = cinema_capacity * 7.50
elif screening_type == "Discount":
income =cinema_capacity * 5.00
print(f"{income:.2f} leva") | [
"82292054+jeniia@users.noreply.github.com"
] | 82292054+jeniia@users.noreply.github.com |
3d4f973e6319ac02322d9c9e19a44bb5e11c4a74 | 360c777a2b77be466b1cf7c8fd74d6fd04f56b55 | /nexus_auth/models/ping.py | 7afa266c0b95f5154844d34bf4d67367e1726a27 | [
"MIT"
] | permissive | hreeder/nexus-auth | 790a3b2623ddf443138a4b0f0af1380dbc4db8ae | 8d51aef01647e32ba4a284f02de73a2caad7cf49 | refs/heads/master | 2021-01-10T10:08:37.190558 | 2016-02-29T12:27:21 | 2016-02-29T12:27:21 | 52,789,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | from nexus_auth import db
from nexus_auth.models.groups import Group
TYPE_SERVER = 0
TYPE_GROUP = 1
class PingServer(db.Model):
id = db.Column(db.Integer, primary_key=True)
servers = db.Column(db.Text)
display_name = db.Column(db.String(64))
class PingTarget(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_group_id = db.Column(db.Integer, db.ForeignKey('group.id'))
type = db.Column(db.SmallInteger)
target = db.Column(db.Integer)
def get_target_representation(self):
if self.type == TYPE_SERVER:
server = PingServer.query.filter_by(id=self.target).first()
return "Server: " + server.display_name
elif self.type == TYPE_GROUP:
group = Group.query.filter_by(id=self.target).first()
return "Group: " + group.name
def get_target_name(self):
if self.type == TYPE_SERVER:
server = PingServer.query.filter_by(id=self.target).first()
return server.display_name
elif self.type == TYPE_GROUP:
group = Group.query.filter_by(id=self.target).first()
return group.name
def get_group(self):
return Group.query.filter_by(id=self.parent_group_id).first()
| [
"harry@harryreeder.co.uk"
] | harry@harryreeder.co.uk |
8f143a7d68ae3d9c5a95915c75116b1bf55deea6 | 2a03035b0ff1585a7b429bd8be4dc70b25ca4577 | /Python/HackerRank/Cloudgame.py | 790ebe4f2f4acc5d5de277975800f86f940a764e | [] | no_license | prtkmishra/AI_ML_Learning | 6be783a7b695681d397c21bd719e608b25524c7f | 46f7b55508bf3f4c7e39c497f143bf8ea7e63490 | refs/heads/master | 2023-06-29T23:30:54.309049 | 2021-08-02T13:42:44 | 2021-08-02T13:42:44 | 276,331,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,622 | py |
class Cloudgame:
"""
There is a new mobile game that starts with consecutively numbered clouds. Some of the clouds are thunderheads and others are cumulus. The player can jump on any cumulus cloud having a number that is equal to the number of the current cloud plus or . The player must avoid the thunderheads. Determine the minimum number of jumps it will take to jump from the starting postion to the last cloud. It is always possible to win the game.
For each game, you will get an array of clouds numbered if they are safe or if they must be avoided.
Function Description
Complete the jumpingOnClouds function in the editor below.
jumpingOnClouds has the following parameter(s):
int c[n]: an array of binary integers
Returns
int: the minimum number of jumps required
Input Format
The first line contains an integer , the total number of clouds. The second line contains space-separated binary integers describing clouds where .
Constraints
Output Format
Print the minimum number of jumps needed to win the game.
Sample Input 0
7
0 0 1 0 0 1 0
Sample Output 0
4
Sample Input 1
6
0 0 0 0 1 0
Sample Output 1
3
Explanation 1:
The only thundercloud to avoid is . The game can be won in jumps:
"""
def __init__(self, c):
self.c = c
def jumpingOnClouds(self):
jumps = 0
visitedcloud = 0
for i in range(len(self.c)-1):
if i < visitedcloud:
pass
else:
if i < len(self.c)-2:
if self.c[i] == 1:
pass
else:
if self.c[i] == self.c[i+2]:
jumps += 1
visitedcloud = i+2
print("Jumped from cloud {} to cloud {} ".format(i,i+2))
else:
if self.c[i] == self.c[i+1]:
jumps += 1
visitedcloud = i + 1
print("Jumped from cloud {} to cloud {} ".format(i,i+1))
else:
if self.c[i] == 1:
pass
else:
if self.c[i] == self.c[i+1]:
jumps += 1
visitedcloud = i+1
print("Jumped from cloud {} to cloud {} ".format(i,i+1))
print("Total number of jumps in the game: ",jumps)
| [
"mishra.eric@gmail.com"
] | mishra.eric@gmail.com |
cd5bbdded967c000f91d69083bde75bb9d01067e | e4a4859e588efbadbb18d796b48db772645a008d | /blog/views.py | 0b1ccf7691cd7039f579182f370ef5f66bdbad81 | [] | no_license | majorsigma/mysite | d2f4c8343231bfaa8ad830640e52f6d9bdc1808b | aec90d5696a93f9a70ffb29be3734be33ee26cfd | refs/heads/master | 2023-04-11T09:21:53.157049 | 2021-04-27T06:36:24 | 2021-04-27T06:36:24 | 357,853,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,669 | py | from django.shortcuts import render, get_object_or_404
from .models import Post, Comment
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import ListView
from .forms import EmailPostForm, CommentForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
def post_list(request, tag_slug=None):
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
paginator = Paginator(object_list, 3)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of result
posts = paginator.page(paginator.num_pages)
return render(request,
'blog/post/list.html',
{'page': page,
'posts': posts,
'tag': tag,})
def post_detail(request, year, month, day, post):
post = get_object_or_404(
Post,
slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day,)
# List of active comments for this post
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
# A comment was post_detail
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Create Comment object but don't save to database yet
new_comment = comment_form.save(commit=False)
# Assign the current post to the comment
new_comment.post = post
# Save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
# List of similar posts
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return render(request, 'blog/post/detail.html', {
'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form,
'similar_posts': similar_posts})
class PostListView(ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
def post_share(request, post_id):
# Retrieve post by id
post = get_object_or_404(
Post,
id=post_id,
status='published'
)
sent = False
if request.method == 'POST':
# Form was submitted
form = EmailPostForm(request.POST)
if form.is_valid():
# Form fields passed validation
cd = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = '{} ({}) recommends you reading "{}"'.format(cd['name'], cd['email'], post.title)
message = 'Read "{}" at {}\n\n{}\'s comments {}'.format(post.title, post_url, cd['name'], cd['comments'])
send_mail(subject, message, 'admin@myblog.com', [cd['to']])
sent = True
else:
form = EmailPostForm()
return render(
request,
'blog/post/share.html',
{
'post': post,
'form': form,
'sent': sent,
}
) | [
"olalekan.o.ogundele@gmail.com"
] | olalekan.o.ogundele@gmail.com |
42e0edb633c9498ca865bd735ff7de4fec5a8333 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /hr_payroll_account/models/hr_payroll_account.py | 8b32bbc1e6f2d79fb1556864a63d4f9623022933 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,589 | py | #-*- coding:utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class HrPayslipLine(models.Model):
_inherit = 'hr.payslip.line'
def _get_partner_id(self, credit_account):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
register_partner_id = self.salary_rule_id.register_id.partner_id
partner_id = register_partner_id.id or self.slip_id.employee_id.address_home_id.id
if credit_account:
if register_partner_id or self.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if register_partner_id or self.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
date = fields.Date('Date Account', states={'draft': [('readonly', False)]}, readonly=True,
help="Keep empty to use the period of the validation(Payslip) date.")
journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,
states={'draft': [('readonly', False)]}, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
move_id = fields.Many2one('account.move', 'Accounting Entry', readonly=True, copy=False)
@api.model
def create(self, vals):
if 'journal_id' in self.env.context:
vals['journal_id'] = self.env.context.get('journal_id')
return super(HrPayslip, self).create(vals)
@api.onchange('contract_id')
def onchange_contract(self):
super(HrPayslip, self).onchange_contract()
self.journal_id = self.contract_id.journal_id.id or (not self.contract_id and self.default_get(['journal_id'])['journal_id'])
@api.multi
def action_payslip_cancel(self):
moves = self.mapped('move_id')
moves.filtered(lambda x: x.state == 'posted').button_cancel()
moves.unlink()
return super(HrPayslip, self).action_payslip_cancel()
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for slip in self:
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = slip.date or slip.date_to
currency = slip.company_id.currency_id
name = _('Payslip of %s') % (slip.employee_id.name)
move_dict = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amount = currency.round(slip.credit_note and -line.total or line.total)
if currency.is_zero(amount):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=False),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount > 0.0 and amount or 0.0,
'credit': amount < 0.0 and -amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=True),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount < 0.0 and -amount or 0.0,
'credit': amount > 0.0 and amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if currency.compare_amounts(credit_sum, debit_sum) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': currency.round(debit_sum - credit_sum),
})
line_ids.append(adjust_credit)
elif currency.compare_amounts(debit_sum, credit_sum) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': currency.round(credit_sum - debit_sum),
'credit': 0.0,
})
line_ids.append(adjust_debit)
move_dict['line_ids'] = line_ids
move = self.env['account.move'].create(move_dict)
slip.write({'move_id': move.id, 'date': date})
move.post()
return res
class HrSalaryRule(models.Model):
_inherit = 'hr.salary.rule'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
account_tax_id = fields.Many2one('account.tax', 'Tax')
account_debit = fields.Many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)])
account_credit = fields.Many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)])
class HrContract(models.Model):
_inherit = 'hr.contract'
_description = 'Employee Contract'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
journal_id = fields.Many2one('account.journal', 'Salary Journal')
class HrPayslipRun(models.Model):
_inherit = 'hr.payslip.run'
journal_id = fields.Many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True,
required=True, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
fdf636afdf8305c4b4a96ecb60581c46eb1ff1c1 | eb9efff3d2996a2640cdf50954a8c13a97b34491 | /ssh_run_wl.py | 2a70f8c5f613e8cff7128edf76b96b51bfef016a | [] | no_license | jagratac/genVT_wlcBench | 3b0b3fde15bb988577d29d83c14b56235f8dbe53 | 5e3d86b0dbd1e76e216f8dd665e6472e76e28a1a | refs/heads/main | 2023-08-27T16:08:18.250486 | 2021-10-25T05:05:38 | 2021-10-25T05:05:38 | 416,367,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import sys
import yaml
import paramiko
def get_VM_info():
with open(sys.argv[1]) as f:
data = yaml.full_load(f)
return data['ip'], data['un'], data['pwd'], data['cmd']
def Create_SSH():
if None in [host,un,pwd]:
print('Missing one of the required credentails')
return None
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Addedd to Keys if missing
ssh.load_system_host_keys()
try:
ssh.connect(host, username=un, password=pwd)
print(f'Connected to {host} as {un}')
except Exception as e:
print(f"Error: {e} while connecting to {host} as {un}")
return ssh
host, un, pwd, cmd = get_VM_info()
ssh = Create_SSH()
if ssh is None:
print('Error in creating connection')
sys.exit()
print(f"Executing the WL by the command {cmd}")
ssh.exec_command(cmd)
| [
"jagrat.acharya@intel.com"
] | jagrat.acharya@intel.com |
ed7f7ae9b779ad7805b38ff2e7e05e94049da473 | 520f8bbd2e9dcbdb576d51a0bee3d23b12094d76 | /helloworldapp/urls.py | 0757e9528ae14128e8bab3bafbf9228b4f32ec57 | [] | no_license | KoyanagiAyuha/helloworldproject | c3927831bf1c6ad00756a5fc87593935f267792e | c6e7ad14b8075914ac71f7a8849732c6760ecd46 | refs/heads/master | 2022-11-29T01:30:20.044669 | 2020-08-10T04:57:37 | 2020-08-10T04:57:37 | 286,204,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from django.urls import path
from .views import hellofunction
urlpatterns = [
path('world', hellofunction),
] | [
"ayuhakoyanagi@AyuhanoMacBook-Air.local"
] | ayuhakoyanagi@AyuhanoMacBook-Air.local |
d57b71394ab89f33a7b8a6c07d5e138611f899dd | cf2e40054af8a9d8c05b51000649d06ec572853d | /Cryptography/app.py | 127c3aabb0b7dc3f2c2e1e2b2b7a233cd43abdc1 | [] | no_license | Wiejeben/HR-Development-1 | 688ab5d60d310b37302e4a0ac29e0b78d6dc819e | a39a82288f3c1ce635ae0a42d41aef848efc4afe | refs/heads/master | 2021-05-30T01:57:27.857728 | 2015-10-11T10:38:40 | 2015-10-11T10:38:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | message = raw_input("Enter a message: ")
offset = input("Encryption offset (number): ")
result = ""
offset %= 26
for char in message:
if char.isalpha():
# Apply offset
number = ord(char) + offset
if char.isupper():
# Uppercase
if number < ord("A"):
number = number + 26
elif number > ord("Z"):
number = number - 26
else:
# Lowercase
if number < ord("a"):
number = number + 26
elif number > ord("z"):
number = number - 26
char = chr(number)
result += char
print result | [
"wiejeben@gmail.com"
] | wiejeben@gmail.com |
8692c4889582e9c8f425306d8b5ac70d4ee7090e | e8cb5f716b064043708293f924ed1ba84005e417 | /examples/Redfish/ex09_find_ilo_mac_address.py | d05e42bb446a1e7359fb7eeacd4a665968932786 | [
"Apache-2.0"
] | permissive | injan0913/python-ilorest-library | 9207caeab89038f7e6ae803c55de183bda02edb3 | 8507d96cf7b9604a30ae6548cafc0003d1098b72 | refs/heads/master | 2020-12-24T22:20:13.135325 | 2016-06-23T18:26:33 | 2016-06-23T18:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | # Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from redfishobject import RedfishObject
from ilorest.rest.v1_helper import ServerDownOrUnreachableError
def ex9_find_ilo_mac_address(redfishobj):
sys.stdout.write("\nEXAMPLE 9: Find iLO's MAC Addresses\n")
instances = redfishobj.search_for_type("Manager.")
for instance in instances:
tmp = redfishobj.redfish_get(instance["@odata.id"])
response = redfishobj.redfish_get(tmp.dict["EthernetInterfaces"]\
["@odata.id"])
for entry in response.dict["Members"]:
ethernet = redfishobj.redfish_get(entry["@odata.id"])
if "MACAddress" not in ethernet.dict:
sys.stderr.write("\tNIC resource does not contain " \
"'MACAddress' property\n")
else:
sys.stdout.write("\t" + ethernet.dict["Name"] + " = " + \
ethernet.dict["MACAddress"] + "\t(" + \
ethernet.dict["Status"]["State"] + ")\n")
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_host = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO address, iLO account name,
# and password to send https requests
iLO_host = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_host, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex9_find_ilo_mac_address(REDFISH_OBJ)
| [
"jackgarcia77@gmail.com"
] | jackgarcia77@gmail.com |
0683bf8ad5c3c694f51449ee7707083a89cb157e | 75c23ba89772bfce73e6d370c18aefaf70baf4e5 | /bin/easy_install-3.6 | f9533533f76d00f9583ce63cda6dec7388aa4659 | [] | no_license | DanielscSeta/Mestrados_FCUP | 30dd529a9e9235021889ede8b659b599fe7fd75e | ce99367b6624e3445e2458f394e8411204c16bf4 | refs/heads/main | 2023-07-27T21:29:18.481205 | 2021-09-10T16:07:19 | 2021-09-10T16:07:19 | 404,388,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | 6 | #!/home/jx23/Mestrados_FCUP/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joaquim.tiago1999@gmail.com"
] | joaquim.tiago1999@gmail.com |
47fd02cdb905a982497ef7ab2f1f11f846f17513 | 8b0c0da3d066d0f277f3d41c30bda28802f034a1 | /nlcd/local.py | e7cfa69dfc4b15f7d5aea58e450d68bc9a100eb6 | [
"MIT"
] | permissive | LucidAi/nlcd | 95dad7deb20f966222ccbc9d7e0dd62cdca1ea99 | 0cf17211e6272505b20089dc300ef0702645bec8 | HEAD | 2016-08-05T09:06:30.072959 | 2015-02-20T06:38:30 | 2015-02-20T06:38:30 | 18,310,003 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | # coding: utf-8
# Author: Vova Zaytsev <zaytsev@usc.edu>
import os
import json
def project_dir(dir_name):
return os.path.join(os.path.dirname(__file__), "..", dir_name)\
.replace("\\", "//")
# with open(project_dir("conf/dev.json"), "rb") as fp:
# CONF = json.load(fp)
SECRET_KEY = "h8(e(u3#k)l802(4mfh^f&&jp!@p*s#98tf++l#z-e83(#$x@*"
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ["localhost"]
INSTALLED_APPS = (
"django.contrib.contenttypes",
"django.contrib.staticfiles",
"client"
)
MIDDLEWARE_CLASSES = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
ROOT_URLCONF = "nlcd.urls"
WSGI_APPLICATION = "nlcd.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": project_dir("nlcd.db"),
}
}
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = "/webapp/"
STATIC_URL = "/webapp/"
STATICFILES_DIRS = (project_dir("webapp"),)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
TEMPLATE_DIRS = (
"webapp/templates",
)
LOGGING = {
"version": 1,
"handlers": {
"logfile": {
"class": "logging.handlers.WatchedFileHandler",
"filename": "log.txt"
},
},
"loggers": {
"django": {
"handlers": ["logfile"],
"level": "DEBUG",
"propagate": False,
},
"nlcd": {
"handlers": ["logfile"],
"level": "DEBUG",
"propagate": False
},
},
}
FILE_UPLOAD_MAX_MEMORY_SIZE = 32 * 1024 * 1024 | [
"zaytsev@usc.edu"
] | zaytsev@usc.edu |
19041833fbb00349ef3cb22e3d4012571f7a05d5 | 633701ef4d039d2cd0d4409bd8ad765b748f1b96 | /ZuheGesdatos/src/GeneraConsulta/Title.py | a9ed747f9776c39769e601dca4aa2371d7973735 | [] | no_license | wahello/gesdatos | 4c991536f3265bf937ad117ed0c9c9b913182db5 | b7fa1939056baa01b48d310d981a5fb1493d6698 | refs/heads/master | 2020-03-11T12:25:37.275071 | 2015-12-14T04:25:35 | 2015-12-14T04:25:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
class Title1(wx.Panel,):
def __init__(self, parent,*args, **kwds):
wx.Panel.__init__(self, parent,size=(100,100))
self.SetBackgroundColour("3399FF")
self.text = wx.StaticText(self, label="SElECCIÓN y FUNCIONES", pos=(0, 0))
self.font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)
self.text.SetFont(self.font)
class Title2(wx.Panel,):
def __init__(self, parent,*args, **kwds):
wx.Panel.__init__(self, parent,size=(100,100))
self.SetBackgroundColour("3399FF")
self.text = wx.StaticText(self, label="OPERACIÓN JOIN", pos=(0, 0))
self.font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)
self.text.SetFont(self.font)
class Title3(wx.Panel,):
def __init__(self, parent,*args, **kwds):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("3399FF")
self.text = wx.StaticText(self, label="CONDICIÓN (WHERE)", pos=(0, 0))
self.font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)
self.text.SetFont(self.font)
class Title4(wx.Panel,):
def __init__(self, parent,*args, **kwds):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("3399FF")
self.text = wx.StaticText(self, label="COMPILADOR", pos=(0, 0))
self.font = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.NORMAL)
self.text.SetFont(self.font)
| [
"FABIO@FABIO-HP"
] | FABIO@FABIO-HP |
5e050b7544ba1fe410e841ccdc2c2724523bf8b8 | 306dd26becd8217d0d4f86e8c36e8fb9dfc069c7 | /Deep-Learning-master/Pytorch_/2_nn_module/2_nn_pytorch_cuda.py | d70c96dc4638e0784f477a3864f5c9b3075af5e8 | [] | no_license | fnasiri/data_science_lessons | cdded42481e697725841251100ca7999febfba6b | 382c0b5e46b355938d1791d39dcfd8170be14274 | refs/heads/master | 2020-03-24T11:27:44.449995 | 2018-07-28T15:03:56 | 2018-07-28T15:03:56 | 142,685,930 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #----------------------------------------------------------------------------
import torch
from torch.autograd import Variable
#----------------------------------------------------------------------------
Batch_size = 64 # Batch size
R = 1000 # Input size
S = 100 # Number of neurons
a_size = 10 # Network output size
#----------------------------------------------------------------------------
p = Variable(torch.randn(Batch_size, R).cuda())
t = Variable(torch.randn(Batch_size, a_size).cuda(), requires_grad=False)
model = torch.nn.Sequential(
torch.nn.Linear(R, S),
torch.nn.ReLU(),
torch.nn.Linear(S, a_size),
)
model.cuda()
performance_index = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-4
for index in range(500):
a = model(p)
loss = performance_index(a, t)
print(index, loss.data[0])
model.zero_grad()
loss.backward()
for param in model.parameters():
param.data -= learning_rate * param.grad.data | [
"farshad.nasiri@ga.co"
] | farshad.nasiri@ga.co |
b7abb0877ecfc6bc8c64ef9fc8b253984d33a132 | 273b6fac2e8d7f6492d3eba8de2e4e18f2931798 | /ceph_deploy/lib/vendor/remoto/lib/vendor/execnet/gateway.py | 244cfe4d412109549d77aad753735aa38ef34489 | [
"MIT"
] | permissive | sheepcat/ceph-deploy | 9e399476b8cac0f48328054189476072ead139a8 | 7e26b0311a733927b9750a84ba500c090be60cbd | refs/heads/master | 2021-02-21T06:48:13.723727 | 2018-09-19T13:02:55 | 2018-09-19T13:02:55 | 245,352,776 | 1 | 0 | MIT | 2020-03-06T07:09:36 | 2020-03-06T07:09:35 | null | UTF-8 | Python | false | false | 7,074 | py | """
gateway code for initiating popen, socket and ssh connections.
(c) 2004-2013, Holger Krekel and others
"""
import sys, os, inspect, types, linecache
import textwrap
import execnet
from execnet.gateway_base import Message
from execnet import gateway_base
importdir = os.path.dirname(os.path.dirname(execnet.__file__))
class Gateway(gateway_base.BaseGateway):
""" Gateway to a local or remote Python Intepreter. """
def __init__(self, io, spec):
super(Gateway, self).__init__(io=io, id=spec.id, _startcount=1)
self.spec = spec
self._initreceive()
@property
def remoteaddress(self):
return self._io.remoteaddress
def __repr__(self):
""" return string representing gateway type and status. """
try:
r = (self.hasreceiver() and 'receive-live' or 'not-receiving')
i = len(self._channelfactory.channels())
except AttributeError:
r = "uninitialized"
i = "no"
return "<%s id=%r %s, %s model, %s active channels>" %(
self.__class__.__name__, self.id, r, self.execmodel.backend, i)
def exit(self):
""" trigger gateway exit. Defer waiting for finishing
of receiver-thread and subprocess activity to when
group.terminate() is called.
"""
self._trace("gateway.exit() called")
if self not in self._group:
self._trace("gateway already unregistered with group")
return
self._group._unregister(self)
try:
self._trace("--> sending GATEWAY_TERMINATE")
self._send(Message.GATEWAY_TERMINATE)
self._trace("--> io.close_write")
self._io.close_write()
except (ValueError, EOFError, IOError):
v = sys.exc_info()[1]
self._trace("io-error: could not send termination sequence")
self._trace(" exception: %r" % v)
def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False):
"""
set the string coercion for this gateway
the default is to try to convert py2 str as py3 str,
but not to try and convert py3 str to py2 str
"""
self._strconfig = (py2str_as_py3str, py3str_as_py2str)
data = gateway_base.dumps_internal(self._strconfig)
self._send(Message.RECONFIGURE, data=data)
def _rinfo(self, update=False):
""" return some sys/env information from remote. """
if update or not hasattr(self, '_cache_rinfo'):
ch = self.remote_exec(rinfo_source)
self._cache_rinfo = RInfo(ch.receive())
return self._cache_rinfo
def hasreceiver(self):
""" return True if gateway is able to receive data. """
return self._receivepool.active_count() > 0
def remote_status(self):
""" return information object about remote execution status. """
channel = self.newchannel()
self._send(Message.STATUS, channel.id)
statusdict = channel.receive()
# the other side didn't actually instantiate a channel
# so we just delete the internal id/channel mapping
self._channelfactory._local_close(channel.id)
return RemoteStatus(statusdict)
def remote_exec(self, source, **kwargs):
""" return channel object and connect it to a remote
execution thread where the given ``source`` executes.
* ``source`` is a string: execute source string remotely
with a ``channel`` put into the global namespace.
* ``source`` is a pure function: serialize source and
call function with ``**kwargs``, adding a
``channel`` object to the keyword arguments.
* ``source`` is a pure module: execute source of module
with a ``channel`` in its global namespace
In all cases the binding ``__name__='__channelexec__'``
will be available in the global namespace of the remotely
executing code.
"""
call_name = None
if isinstance(source, types.ModuleType):
linecache.updatecache(inspect.getsourcefile(source))
source = inspect.getsource(source)
elif isinstance(source, types.FunctionType):
call_name = source.__name__
source = _source_of_function(source)
else:
source = textwrap.dedent(str(source))
if call_name is None and kwargs:
raise TypeError("can't pass kwargs to non-function remote_exec")
channel = self.newchannel()
self._send(Message.CHANNEL_EXEC,
channel.id,
gateway_base.dumps_internal((source, call_name, kwargs)))
return channel
def remote_init_threads(self, num=None):
""" DEPRECATED. Is currently a NO-OPERATION already."""
print ("WARNING: remote_init_threads() is a no-operation in execnet-1.2")
class RInfo:
def __init__(self, kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
info = ", ".join(["%s=%s" % item
for item in self.__dict__.items()])
return "<RInfo %r>" % info
RemoteStatus = RInfo
def rinfo_source(channel):
import sys, os
channel.send(dict(
executable = sys.executable,
version_info = sys.version_info[:5],
platform = sys.platform,
cwd = os.getcwd(),
pid = os.getpid(),
))
def _find_non_builtin_globals(source, codeobj):
try:
import ast
except ImportError:
return None
try:
import __builtin__
except ImportError:
import builtins as __builtin__
vars = dict.fromkeys(codeobj.co_varnames)
all = []
for node in ast.walk(ast.parse(source)):
if (isinstance(node, ast.Name) and node.id not in vars and
node.id not in __builtin__.__dict__):
all.append(node.id)
return all
def _source_of_function(function):
if function.__name__ == '<lambda>':
raise ValueError("can't evaluate lambda functions'")
#XXX: we dont check before remote instanciation
# if arguments are used propperly
args, varargs, keywords, defaults = inspect.getargspec(function)
if args[0] != 'channel':
raise ValueError('expected first function argument to be `channel`')
if sys.version_info < (3,0):
closure = function.func_closure
codeobj = function.func_code
else:
closure = function.__closure__
codeobj = function.__code__
if closure is not None:
raise ValueError("functions with closures can't be passed")
try:
source = inspect.getsource(function)
except IOError:
raise ValueError("can't find source file for %s" % function)
source = textwrap.dedent(source) # just for inner functions
used_globals = _find_non_builtin_globals(source, codeobj)
if used_globals:
raise ValueError(
"the use of non-builtin globals isn't supported",
used_globals,
)
return source
| [
"1728642273@qq.com"
] | 1728642273@qq.com |
08dde7520a5cc6318c6ea6a3daea7417cf1e7d49 | 8050168c08d5bb26f0da6784ca3d536950d43810 | /activity/migrations/0009_auto_20190305_1508.py | b246aa4d037ae5a5422101d5dacb29818286457f | [] | no_license | qoutland/docent | 043f945d8a3016fdc54ee113a108a608e58456dc | f4dffaa3b72d922dfb99e40e7f73155ad25a2509 | refs/heads/master | 2022-12-15T00:13:24.940849 | 2019-05-02T17:55:43 | 2019-05-02T17:55:43 | 164,701,946 | 1 | 0 | null | 2022-11-22T03:31:43 | 2019-01-08T17:41:28 | Python | UTF-8 | Python | false | false | 386 | py | # Generated by Django 2.1.3 on 2019-03-05 23:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activity', '0008_auto_20190305_1507'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='pic_url',
field=models.URLField(blank=True),
),
]
| [
"="
] | = |
af3293648faee313fc72fa54dbfcff027235b08b | a47427997bd767888763ed7ea0446d214da72513 | /todolist/todo/forms.py | c68bdcd768ba8838dd1f4f2388ed5b388cc2d2b6 | [] | no_license | davshibru/todo-form | dfd8fb091632c98b0a9e66178f7cf16e20d71834 | c05a22074e36df22ba90ad147eb5df033070e04e | refs/heads/master | 2020-05-04T16:55:00.456214 | 2019-04-03T13:15:02 | 2019-04-03T13:15:02 | 179,291,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from django import forms
from .models import To
class ToForm(forms.ModelForm):
class Meta:
model = To
fields = ('todo', 'action',) | [
"noreply@github.com"
] | noreply@github.com |
fac7a4a64b214d06b499eece4d7bdc6d4ceb1240 | 2d557cb608984e1978132be8acdb41b96f3bfcf9 | /polls/migrations/0001_initial.py | 3b78c1d0e9db1fba2f7bf8b4a69c745c4760d361 | [] | no_license | apoorv1997/Django | 58756b4207884e96b408eeb31fda4ff77640c632 | 6884c282e5012769ca2ea11424741b928f8bcf2e | refs/heads/master | 2021-01-15T11:29:54.122023 | 2017-08-08T09:00:19 | 2017-08-08T09:00:19 | 99,620,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-07 14:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"apoorv.b15@iiits.in"
] | apoorv.b15@iiits.in |
78b4cd946d9b8904968af0f9c9b27cfba1edf1ae | 0e147f35dcc10ab063074b3a63beb575553cfa5d | /djangboots/urls.py | 4a246cc69912352a85c79cd2b1ff6f49fce104d4 | [] | no_license | joyonto51/djangoboots | f972fe75ceb6c349f4bb21d382e646cd5bbe6d6f | 0dcccda0524938431239a750878a3e0745821dae | refs/heads/master | 2021-09-23T03:53:22.281560 | 2019-12-23T16:16:30 | 2019-12-23T16:16:30 | 229,782,158 | 0 | 0 | null | 2021-09-22T18:17:48 | 2019-12-23T15:55:03 | HTML | UTF-8 | Python | false | false | 915 | py | """djangboots URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sample_crud.urls')),
# path('', TemplateView.as_view(template_name='base.html')),
]
| [
"joyonto51@gmail.com"
] | joyonto51@gmail.com |
3173fdd72b9488c31024b627d504418b381ca1a4 | d22c766670843e66a4d3b2c808090fccb58674d2 | /python_int/fib.py | 959dfa950fe3e5a8790f57254b7ab4b4c46a34b5 | [] | no_license | gnuwho/test_bootstrap | b6c6554848932c35783eee147f0b60b5afe72121 | bee3f02a9cf162ae346f1b3ece1ed5793bf220ed | refs/heads/master | 2021-01-11T16:31:16.401322 | 2017-02-11T04:10:09 | 2017-02-11T04:10:09 | 80,097,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def F(n):
if n == 0: return 0
elif n == 1: return 1
else: return F(n-1)+F(n-2)
for i in xrange(2000):
print F(i)
| [
"gnuwho@gmail.com"
] | gnuwho@gmail.com |
fe767d258b2e1863022c3822a5dc6cb9bef4067c | 4100d382fd6434846e00440e26bc216a6e4340bc | /spotify/v1/artist/__init__.py | 868c1903c3f1a009ee7cd3cd3c93cbb4ac0563f6 | [
"MIT"
] | permissive | jingming/spotify | 1aa96ffc90dde8d26c5ce93cac8422d4cd61fd39 | d92c71073b2515f3c850604114133a7d2022d1a4 | refs/heads/master | 2021-09-21T21:50:09.109613 | 2018-01-30T05:03:56 | 2018-01-31T00:32:47 | 118,386,962 | 2 | 1 | MIT | 2018-08-31T22:18:29 | 2018-01-22T00:35:19 | Python | UTF-8 | Python | false | false | 2,886 | py | from spotify.object.followers import Followers
from spotify.object.image import Image
from spotify.page import Page
from spotify.resource import Resource, UpgradableInstance
from spotify.v1.artist.album import AlbumList
from spotify.v1.artist.related_artist import RelatedArtistList
from spotify.v1.artist.top_track import TopTrackList
class ArtistContext(Resource):
def __init__(self, version, id):
super(ArtistContext, self).__init__(version)
self.id = id
self._albums = None
self._top_tracks = None
self._related_artists = None
@property
def albums(self):
if not self._albums:
self._albums = AlbumList(self.version, self.id)
return self._albums
@property
def top_tracks(self):
if not self._top_tracks:
self._top_tracks = TopTrackList(self.version, self.id)
return self.top_tracks
@property
def related_artists(self):
if not self._related_artists:
self._related_artists = RelatedArtistList(self.version, self.id)
return self._related_artists
def fetch(self):
response = self.version.request('GET', '/artists/{}'.format(self.id))
return ArtistInstance(self.version, response.json())
class ArtistInstance(UpgradableInstance):
def __init__(self, version, properties):
super(ArtistInstance, self).__init__(version, properties)
self._context = ArtistContext(self.version, self.id)
@property
def external_urls(self):
return self.property('external_urls')
@property
def followers(self):
return Followers.from_json(self.property('followers'))
@property
def genres(self):
return self.property('genres')
@property
def id(self):
return self.property('id')
@property
def images(self):
return [Image.from_json(image) for image in self.property('images')]
@property
def name(self):
return self.property('name')
@property
def popularity(self):
return self.property('popularity')
@property
def type(self):
return self.property('type')
@property
def uri(self):
return self.property('uri')
@property
def albums(self):
return self._context.albums
@property
def top_tracks(self):
return self._context.top_tracks
@property
def related_artists(self):
return self._context.related_artists
class ArtistList(Resource):
def get(self, id):
return ArtistContext(self.version, id)
def list(self, ids):
response = self.version.request('GET', '/artists', params={
'ids': ','.join(ids)
})
return ArtistPage(self.version, response.json(), 'artists')
class ArtistPage(Page):
@property
def instance_class(self):
return ArtistInstance
| [
"niu@jingming.ca"
] | niu@jingming.ca |
2656d3ab378b0588a8fdd7fde8e234e1c5d60d45 | 69208a0ff78088f0fccace4ad0f252af83e37d6b | /controller/page_assertion_controller.py | d8195d650d4160cc054cbddf11408f51ce1ad95f | [] | no_license | chesterqian/uitesting | 644147c5c052e06f205d203e4d8b2c0815116efd | a860fd48d6137932f12f9128022b877f5cfdd596 | refs/heads/master | 2022-12-20T10:45:59.219195 | 2020-09-29T05:11:37 | 2020-09-29T05:11:37 | 299,259,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,172 | py | '''
Created on Otc 31, 2013
@author: Jamous.Fu
'''
from locator.course_page_xpath import *
from common.global_config import Global
from common.logger import print_debug_info
from controller.basic_controller import BasicController
class PageAssertionController(BasicController):
def destory_page_assertion_object(self):
self.page_assertion_obj = None
def element_should_be_existing(self, xpath="\\"):
print_debug_info("Calling [element_should_be_existing].")
return self.page_assertion_obj.is_existing(xpath, True)
def element_should_not_be_existing(self, xpath="\\"):
print_debug_info("Calling [element_should_not_be_existing].")
return self.page_assertion_obj.is_existing(xpath, False)
def elements_should_be_existing_with_expected_number(self, xpath="\\", expected_number=15):
# 15,this default number, is the maximum number of words can be displayed on one page.
print_debug_info("Calling[elements_should_be_existing_with_expected_number].")
return self.page_assertion_obj.check_element_count(xpath, expected_number)
def collect_checkpoint_error_results(self):
print_debug_info("Calling [collect_checkpoint_error_results].")
self.page_assertion_obj.collect_checkpoint_results()
def text_should_be_existing(self, text):
print_debug_info("Calling [text_should_be_existing].")
return self.page_assertion_obj.is_text_existing(text, True)
def text_should_not_be_existing(self, text):
print_debug_info("Calling [text_should_not_be_existing].")
return self.page_assertion_obj.is_text_existing(text, False)
def current_url_should_end_with(self, partial_url):
print_debug_info("Calling [current_url_should_end_with].")
return self.page_assertion_obj.is_on_right_page(partial_url, True, 'endwith')
def current_url_should_contains(self, partial_url):
print_debug_info("Calling [current_url_should_contains].")
return self.page_assertion_obj.is_on_right_page(partial_url, True)
def current_url_should_not_contain(self, partial_url):
print_debug_info("Calling [current_url_should_not_contain].")
return self.page_assertion_obj.is_on_right_page(partial_url, False)
def element_should_be_displayed(self, xpath="\\"):
print_debug_info("Calling [element_should_be_displayed].")
return self.page_assertion_obj.element_is_displayed(xpath, True)
def element_should_not_be_displayed(self, xpath="\\"):
print_debug_info("Calling [element_should_not_be_displayed].")
return self.page_assertion_obj.element_is_displayed(xpath, False)
def element_should_be_enabled(self, xpath="\\"):
print_debug_info("Calling [element_should_be_enabled].")
return self.page_assertion_obj.element_is_enabled(xpath, True)
def element_should_be_disabled(self, xpath="\\"):
print_debug_info("Calling [element_should_be_disabled].")
return self.page_assertion_obj.element_is_enabled(xpath, False)
def innertext_of_element_should_be(self, xpath="\\", text=""):
print_debug_info("Calling [innertext_of_element_should_be].")
return self.page_assertion_obj.element_is_displayed_with_text(xpath, text, True)
def innertext_of_element_should_contain(self, xpath="\\", text=""):
print_debug_info("Calling [innertext_of_element_should_contain].")
return self.page_assertion_obj.element_is_displayed_with_text(xpath, text, False)
def element_should_have_css_class_name(self, xpath="\\", css_class_name=""):
print_debug_info("Calling [element_should_have_css_class_name].")
return self.page_assertion_obj.element_has_css_class_name(xpath, css_class_name, True)
def element_should_not_have_css_class_name(self, xpath="\\", css_class_name=""):
print_debug_info("Calling [element_should_not_have_css_class_name].")
return self.page_assertion_obj.element_has_css_class_name(xpath, css_class_name, False)
def current_activity_navigation_status_should_be_passed(self):
print_debug_info("Calling [current_activity_navigation_status_should_be_passed].")
return self.page_assertion_obj.check_activity_navigator_status(True)
def current_activity_navigation_status_should_be_normal(self):
print_debug_info("Calling [current_activity_navigation_status_should_be_normal].")
return self.page_assertion_obj.check_activity_navigator_status(False)
def epaper_should_be_expanded(self):
print_debug_info("Calling [epaper_should_be_expanded].")
return self.page_assertion_obj.check_epaper_is_expanded(True)
def epaper_should_not_be_expanded(self):
print_debug_info("Calling [epaper_should_not_be_expanded].")
return self.page_assertion_obj.check_epaper_is_expanded(False)
def element_should_have_blurb_id(self, xpath="\\", blurb_id=""):
print_debug_info("Calling [element_should_have_blurb_id].")
return self.page_assertion_obj.check_element_blurb_id(xpath, blurb_id, True)
def element_should_not_have_blurb_id(self, xpath="\\", blurb_id=""):
print_debug_info("Calling [element_should_not_have_blurb_id].")
return self.page_assertion_obj.check_element_blurb_id(xpath, blurb_id, False)
def element_should_be_displayed_same_value_as_blurb_id(self, xpath="\\", blurb_id=""):
print_debug_info("Calling [element_should_be_displayed_same_value_as_blurb_id].")
return self.page_assertion_obj.check_element_text_by_blurb_id(xpath, blurb_id, True)
def element_should_not_be_displayed_same_value_as_blurb_id(self, xpath="\\", blurb_id=""):
print_debug_info("Calling [element_should_not_be_displayed_same_value_as_blurb_id].")
return self.page_assertion_obj.check_element_text_by_blurb_id(xpath, blurb_id, False)
def node_should_be_passed(self, course_query_string):
print_debug_info("Calling [node_should_be_passed].")
return self.page_assertion_obj.check_specific_node_is_passed(course_query_string, True)
def node_should_not_be_passed(self, course_query_string):
print_debug_info("Calling [node_should_not_be_passed].")
return self.page_assertion_obj.check_specific_node_is_passed(course_query_string, False)
def activity_container_normal_navigator_indexer_should_be(self, expected_indexer_string):
print_debug_info("Calling [activity_container_normal_navigator_indexer_should_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.ACTIVITY_CONTAINER_PAGE, 'normal', expected_indexer_string, True)
def activity_container_normal_navigator_indexer_should_not_be(self, expected_indexer_string):
print_debug_info("Calling [activity_container_normal_navigator_indexer_should_not_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.ACTIVITY_CONTAINER_PAGE, 'normal', expected_indexer_string, False)
def activity_container_passed_navigator_indexer_should_be(self, expected_indexer_string):
print_debug_info("Calling [activity_container_passed_navigator_indexer_should_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.ACTIVITY_CONTAINER_PAGE, 'passed', expected_indexer_string, True)
def activity_container_passed_navigator_indexer_should_not_be(self, expected_indexer_string):
print_debug_info("Calling [activity_container_passed_navigator_indexer_should_not_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.ACTIVITY_CONTAINER_PAGE, 'passed', expected_indexer_string, False)
def activity_container_summary_navigator_should_be_passed(self):
print_debug_info("Calling [activity_container_summary_navigator_should_be_pass].")
return self.page_assertion_obj.check_activity_container_summary_navigator_status(True)
def activity_container_summary_navigator_should_be_normal(self):
print_debug_info("Calling [activity_container_summary_navigator_should_not_be_pass].")
return self.page_assertion_obj.check_activity_container_summary_navigator_status(False)
def step_summary_normal_navigator_indexer_should_be(self, expected_indexer_string):
print_debug_info("Calling [step_summary_normal_navigator_indexer_should_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.STEP_SUMMARY_PAGE, 'normal', expected_indexer_string, True)
def step_summary_normal_navigator_indexer_should_not_be(self, expected_indexer_string):
print_debug_info("Calling [step_summary_normal_navigator_indexer_should_not_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.STEP_SUMMARY_PAGE, 'normal', expected_indexer_string, False)
def step_summary_passed_navigator_indexer_should_be(self, expected_indexer_string):
print_debug_info("Calling [step_summary_passed_navigator_indexer_should_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.STEP_SUMMARY_PAGE, 'passed', expected_indexer_string, True)
def step_summary_passed_navigator_indexer_should_not_be(self, expected_indexer_string):
print_debug_info("Calling [step_summary_passed_navigator_indexer_should_not_be].")
return self.page_assertion_obj.check_activity_navigator_status_by_page_type( \
Global.PageType.STEP_SUMMARY_PAGE, 'passed', expected_indexer_string, False)
def lesson_step_should_be_passed(self, step_index):
print_debug_info("Calling [lesson_step_should_be_passed].")
return self.page_assertion_obj.check_lesson_step_status(step_index, 'passed')
def lesson_step_should_be_perfect(self, step_index):
print_debug_info("Calling [lesson_step_should_be_perfect].")
return self.page_assertion_obj.check_lesson_step_status(step_index, 'perfect')
def lesson_step_should_be_normal(self, step_index):
print_debug_info("Calling [lesson_step_should_be_normal].")
return self.page_assertion_obj.check_lesson_step_status(step_index, 'normal')
def lesson_step_index_should_be(self, step_index, index_text):
print_debug_info("Calling [lesson_step_index_should_be].")
return self.page_assertion_obj.element_is_displayed_with_text( \
STEP_ITEM_INDEX_PATTERN % step_index, index_text, True)
def lesson_step_category_should_be(self, step_index, category_text):
print_debug_info("Calling [lesson_step_category_should_be].")
return self.page_assertion_obj.element_is_displayed_with_text( \
STEP_ITEM_CATEGORY_PATTERN % step_index, category_text, True)
def lesson_step_title_should_be(self, step_index, title_text):
print_debug_info("Calling [lesson_step_title_should_be].")
return self.page_assertion_obj.element_is_displayed_with_text( \
STEP_ITEM_TITLE_PATTERN % step_index, title_text, True)
def lesson_step_should_have_start_button(self, step_index):
print_debug_info("Calling [lesson_step_should_have_start_button].")
return self.page_assertion_obj.element_is_displayed( \
STEP_ITEM_START_BUTTON_PATTERN % step_index, True)
def lesson_step_should_not_have_start_button(self, step_index):
print_debug_info("Calling [lesson_step_should_not_have_start_button].")
return self.page_assertion_obj.element_is_displayed( \
STEP_ITEM_START_BUTTON_PATTERN % step_index, False)
def lesson_step_should_have_continue_button(self, step_index):
print_debug_info("Calling [lesson_step_should_have_continue_button].")
return self.page_assertion_obj.element_is_displayed( \
STEP_ITEM_CONTINUE_BUTTON_PATTERN % step_index, True)
def lesson_step_should_not_have_continue_button(self, step_index):
print_debug_info("Calling [lesson_step_should_not_have_continue_button].")
return self.page_assertion_obj.element_is_displayed( \
STEP_ITEM_CONTINUE_BUTTON_PATTERN % step_index, False)
def unit_lesson_title_should_be(self, lesson_index, lesson_title_text):
print_debug_info("Calling [unit_lesson_title_should_be].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'title', lesson_title_text, True)
def unit_lesson_score_should_display(self, lesson_index, score_value):
print_debug_info("Calling [unit_lesson_score_should_display].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'lesson_score', score_value, True)
def unit_lesson_score_should_not_display(self, lesson_index, score_value):
print_debug_info("Calling [unit_lesson_score_should_not_display].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'lesson_score', score_value, False)
def unit_lesson_status_should_be_normal(self, lesson_index):
print_debug_info("Calling [unit_lesson_status_should_be_normal].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'status', 'normal', True)
def unit_lesson_status_should_be_passed(self, lesson_index):
print_debug_info("Calling [unit_lesson_status_should_be_passed].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'status', 'passed', True)
def unit_lesson_status_should_be_locked(self, lesson_index):
print_debug_info("Calling [unit_lesson_status_should_be_locked].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'status', 'locked', True)
def unit_lesson_status_should_not_be_locked(self, lesson_index):
print_debug_info("Calling [unit_lesson_status_should_not_be_locked].")
return self.page_assertion_obj.check_lesson_attribute_value(lesson_index, 'status', 'locked', False)
def active_unit_dots_string_should_be(self, active_unit_dots_string):
print_debug_info("Calling [active_unit_dots_string_should_be].")
return self.page_assertion_obj.check_unit_dots_status('active', active_unit_dots_string)
def inactive_unit_dots_string_should_be(self, inactive_unit_dots_string):
print_debug_info("Calling [inactive_unit_dots_string_should_be].")
return self.page_assertion_obj.check_unit_dots_status('inactive', inactive_unit_dots_string)
def active_lesson_block_string_should_be(self, active_lesson_block_string=''):
print_debug_info("Calling [active_lesson_block_string_should_be].")
return self.page_assertion_obj.check_lesson_block_status('active', active_lesson_block_string)
def inactive_lesson_block_string_should_be(self, inactive_lesson_block_string):
print_debug_info("Calling [inactive_lesson_block_string_should_be].")
return self.page_assertion_obj.check_lesson_block_status('inactive', inactive_lesson_block_string) | [
"junjie.j.qian@pwc.com"
] | junjie.j.qian@pwc.com |
f56ba54a66e2545698c9990565b4122d99a5cbe0 | 211b9135e09cf1ae290ea42349f495ab5ad62852 | /ligpargen/inout/pqr.py | f8724c2ebdb7a4dc4f61c287b35741b70cee4218 | [
"MIT"
] | permissive | quantaosun/ligpargen | e4691dea466918d8d237410bdef42a4b1c8785e7 | edc5445a57f858e800baa818cd5f6b3cdaa76ea9 | refs/heads/main | 2023-03-21T21:48:54.892052 | 2021-03-10T10:28:23 | 2021-03-10T10:28:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | """
Module with functions to generate PQR input (PQR)
Author: Israel Cabeza de Vaca Lopez
Email: israel.cabezadevaca@icm.uu.se
"""
import os
kcalToKj = 4.184
def writePQR(molecule, pqrFile):
"""Generate PQR file
Parameters
----------
molecule : molecule class
Molecule class
pqrFile : str
PQR file name
"""
sigmaFactor = 2.0**(1.0/6.0) # 2^(1/6)
with open(pqrFile, 'w') as ofile:
atomsToWrite = sorted([atom for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]], key = lambda x: x.serialOriginal)
for i, atom in enumerate(atomsToWrite, start =1):
ofile.write('ATOM %5d %4s %3s %4d %8.3f%8.3f%8.3f%8.4f%7.4f\n' % (i, atom.nameOriginal, molecule.residueName, 1,atom.x + molecule.shiftX,
atom.y + molecule.shiftY, atom.z + molecule.shiftZ, atom.charge, (atom.sigma/2.0)*sigmaFactor))
def getFileNames(molname, workdir):
"""Return output file names
Parameters
----------
molname : str
Molecule name
workdir : str
Working folder path
Returns
-------
pqrFile : str
PQR file name
"""
return os.path.join(workdir,molname+'.pqr')
def write(molecule, molName, workdir):
pqrFile = getFileNames(molName, workdir)
writePQR(molecule, pqrFile)
| [
"learsi9182@gmail.com"
] | learsi9182@gmail.com |
fff3e450967edd4d5d28de96357ed12b9db6ef16 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/containerapp/azext_containerapp/tests/latest/test_containerapp_preview_scenario.py | c5bec1652214c86c622338ef87c1eb662d0ab5a8 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 9,214 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
from time import sleep
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, live_only)
from subprocess import run
from .common import (write_test_file, TEST_LOCATION, clean_up_test_file)
from .utils import create_containerapp_env
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class ContainerappPreviewScenarioTest(ScenarioTest):
def __init__(self, method_name, config_file=None, recording_name=None, recording_processors=None,
replay_processors=None, recording_patches=None, replay_patches=None, random_config_dir=False):
super().__init__(method_name, config_file, recording_name, recording_processors, replay_processors,
recording_patches, replay_patches, random_config_dir)
cmd = ['azdev', 'extension', 'add', 'connectedk8s']
run(cmd, check=True)
cmd = ['azdev', 'extension', 'add', 'k8s-extension']
run(cmd, check=True)
# Wait for extensions to be installed
# We mock time.sleep in azure-sdk-tools, that's why we need to use sleep here.
sleep(120)
@ResourceGroupPreparer(location="eastus", random_name_length=15)
def test_containerapp_preview_environment_type(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
aks_name = "my-aks-cluster"
connected_cluster_name = "my-connected-cluster"
custom_location_id = None
try:
self.cmd(f'aks create --resource-group {resource_group} --name {aks_name} --enable-aad --generate-ssh-keys --enable-cluster-autoscaler --min-count 4 --max-count 10 --node-count 4')
self.cmd(f'aks get-credentials --resource-group {resource_group} --name {aks_name} --overwrite-existing --admin')
self.cmd(f'connectedk8s connect --resource-group {resource_group} --name {connected_cluster_name}')
connected_cluster = self.cmd(f'az connectedk8s show --resource-group {resource_group} --name {connected_cluster_name}').get_output_in_json()
connected_cluster_id = connected_cluster.get('id')
extension = self.cmd(f'az k8s-extension create'
f' --resource-group {resource_group}'
f' --name containerapp-ext'
f' --cluster-type connectedClusters'
f' --cluster-name {connected_cluster_name}'
f' --extension-type "Microsoft.App.Environment" '
f' --release-train stable'
f' --auto-upgrade-minor-version true'
f' --scope cluster'
f' --release-namespace appplat-ns'
f' --configuration-settings "Microsoft.CustomLocation.ServiceAccount=default"'
f' --configuration-settings "appsNamespace=appplat-ns"'
f' --configuration-settings "clusterName={connected_cluster_name}"'
f' --configuration-settings "envoy.annotations.service.beta.kubernetes.io/azure-load-balancer-resource-group={resource_group}"').get_output_in_json()
custom_location_name = "my-custom-location"
custom_location_id = self.cmd(f'az customlocation create -g {resource_group} -n {custom_location_name} -l {TEST_LOCATION} --host-resource-id {connected_cluster_id} --namespace appplat-ns -c {extension["id"]}').get_output_in_json()['id']
except Exception as e:
pass
# create connected environment with client or create a command for connected?
sub_id = self.cmd('az account show').get_output_in_json()['id']
connected_env_name = 'my-connected-env'
connected_env_resource_id = f"/subscriptions/{sub_id}/resourceGroups/{resource_group}/providers/Microsoft.App/connectedEnvironments/{connected_env_name}"
file = f"{resource_group}.json"
env_payload = '{{ "location": "{location}", "extendedLocation": {{ "name": "{custom_location_id}", "type": "CustomLocation" }}, "Properties": {{}}}}' \
.format(location=TEST_LOCATION, custom_location_id=custom_location_id)
write_test_file(file, env_payload)
self.cmd(f'az rest --method put --uri "{connected_env_resource_id}?api-version=2022-06-01-preview" --body "@{file}"')
containerapp_env = self.cmd(f'az rest --method get --uri "{connected_env_resource_id}?api-version=2022-06-01-preview"').get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() != "succeeded":
time.sleep(5)
containerapp_env = self.cmd(
f'az rest --method get --uri "{connected_env_resource_id}?api-version=2022-06-01-preview"').get_output_in_json()
ca_name = self.create_random_name(prefix='containerapp', length=24)
self.cmd(
f'az containerapp create --name {ca_name} --resource-group {resource_group} --environment {connected_env_name} --image "mcr.microsoft.com/k8se/quickstart:latest" --environment-type connected',
checks=[
JMESPathCheck('properties.environmentId', connected_env_resource_id),
JMESPathCheck('properties.provisioningState', "Succeeded")
])
ca_name2 = self.create_random_name(prefix='containerapp', length=24)
self.cmd(
f'az containerapp create --name {ca_name2} --resource-group {resource_group} --environment {connected_env_resource_id} --image "mcr.microsoft.com/k8se/quickstart:latest" --environment-type connected',
checks=[
JMESPathCheck('properties.environmentId', connected_env_resource_id),
JMESPathCheck('properties.provisioningState', "Succeeded")
])
# test show/list/delete
self.cmd('containerapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 2)
])
self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'connected'), checks=[
JMESPathCheck('length(@)', 2)
])
self.cmd('containerapp list -g {} --environment-type {} --environment {}'.format(resource_group, 'connected', connected_env_name), checks=[
JMESPathCheck('length(@)', 2)
])
self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'managed'), checks=[
JMESPathCheck('length(@)', 0)
])
app2 = self.cmd('containerapp show -n {} -g {}'.format(ca_name2, resource_group)).get_output_in_json()
self.cmd('containerapp delete --ids {} --yes'.format(app2['id']))
self.cmd('containerapp delete -n {} -g {} --yes'.format(ca_name, resource_group))
self.cmd('containerapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 0)
])
clean_up_test_file(file)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_preview_e2e(self, resource_group):
self.cmd('configure --defaults location={}'.format(TEST_LOCATION))
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
create_containerapp_env(self, env_name, resource_group)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd(
f'az containerapp create --name {ca_name} --resource-group {resource_group} --environment {env_name} --image "mcr.microsoft.com/k8se/quickstart:latest" --environment-type managed',
checks=[
JMESPathCheck('properties.environmentId', containerapp_env['id']),
JMESPathCheck('properties.provisioningState', "Succeeded")
])
app = self.cmd(
'containerapp show -n {} -g {}'.format(ca_name, resource_group),
checks=[
JMESPathCheck('properties.environmentId', containerapp_env['id']),
JMESPathCheck('properties.provisioningState', "Succeeded"),
JMESPathCheck('name', ca_name),
]
).get_output_in_json()
self.cmd('containerapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1)
])
self.cmd('containerapp list -g {} --environment-type {}'.format(resource_group, 'managed'), checks=[
JMESPathCheck('length(@)', 1)
])
self.cmd('containerapp delete --ids {} --yes'.format(app['id']))
self.cmd('containerapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 0)
])
| [
"noreply@github.com"
] | noreply@github.com |
1c70397d0cba4a372a847462b857a32c0fd54df2 | 13ff676f375a737339b0822906b10b5f5d5250d6 | /Python/gauss_adelante.py | b44fb6a97d2e9634e01f562e7ce36e3a37b29def | [] | no_license | m-herrera/ANPI-Catalog | 97e8dae9ee592cd27f0da4fe6da5eab01791a903 | 5397243bb2013854437de6ae22eafaad7bb49f12 | refs/heads/master | 2022-03-13T19:22:13.039730 | 2019-11-04T03:02:13 | 2019-11-04T03:02:13 | 212,427,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | import numpy as np
"""
Metodo iterativo de gauss hacia adelante para solucion de sistemas de
ecuaciones.
Entradas: Matriz de Coeficientes, matriz de terminos independientes,
valor inicial y tolerancia.
Salidas: aproximacion y cantidad de iteraciones.
"""
def gauss_adelante(A, b, x0, tol):
n = len(A)
A = np.array(A)
b = np.array(b)
L = np.zeros((n, n))
D = np.zeros((n, n))
U = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i > j:
L[i][j] = A[i][j]
elif i < j:
U[i][j] = A[i][j]
else:
D[i][j] = A[i][j]
x = np.array(x0)
error = np.linalg.norm(np.dot(A, x) - b)
i = 0
m_inv = np.linalg.inv(D + U)
m_times_n = np.dot(m_inv, (-1 * L))
m_times_b = np.dot(m_inv, b)
while error >= tol:
x = np.dot(m_times_n, x) + m_times_b
error = np.linalg.norm(np.dot(A, x) - b)
i += 1
return x, i
print(gauss_adelante([[3,1,1],[1,3,1],[1,1,3]], [[5],[5],[5]], [[-1],[0],[-1]], 0.001)) | [
"m.herrera0799@gmail.com"
] | m.herrera0799@gmail.com |
676a8d4121ad27fd5bfa82844f08c833b388178c | ffab02cf7e1213f91923cb1343cef4616a7de5a7 | /venv/bin/isort | 6d23f4819e78f95b81f0dc605acf081309c42fe5 | [] | no_license | mornville/flask_blog | 4e50d6c3f835274589b278ce14f2f445b691b087 | bf66060f3f519170e3d4865e6d85b6543359e9b0 | refs/heads/master | 2021-12-28T08:27:04.556959 | 2019-10-01T14:57:09 | 2019-10-01T14:57:09 | 203,522,537 | 0 | 0 | null | 2021-12-13T20:16:58 | 2019-08-21T06:37:56 | Python | UTF-8 | Python | false | false | 251 | #!/Users/ashutoshjha/Desktop/flask_blog/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"aj97389@gmail.com"
] | aj97389@gmail.com | |
5a2ebd58eb237a8079aa77a1e5023bf20e50f182 | 4659f206098fdcaa72b059f1c5e4afe4c5fad3d5 | /planemo-de/xenv/lib/python3.7/site-packages/galaxy/__init__.py | 7653e9a409809c7918eef0427eacfbc2d38e427d | [] | no_license | Slugger70/galaxy-metabolomics | e1ef083316394ace66c1f69c313db0a0fc8c3dec | 0cbee8fe9e7cf1cc37832751ffdd9f88ff363136 | refs/heads/master | 2020-09-19T21:51:16.177730 | 2019-11-26T23:54:43 | 2019-11-26T23:54:43 | 224,306,539 | 0 | 0 | null | 2019-11-26T23:45:53 | 2019-11-26T23:45:52 | null | UTF-8 | Python | false | false | 370 | py | # -*- coding: utf-8 -*-
__version__ = '19.5.2'
PROJECT_NAME = "galaxy-lib"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy-lib"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_EMAIL = 'jmchilton@gmail.com'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
| [
"ma.bioinformatics@gmail.com"
] | ma.bioinformatics@gmail.com |
c553e7fd3437019d4e7b5b575e93d1ba5422ad57 | 527c39fa246ec9ba271589c63af2f0b2bc292d6e | /carwash_Project/asgi.py | 69d82c4b733b781314da018ccd064f678387f8cb | [] | no_license | shotttik/carwashProject | 422295d3da8870e1f31a69e50dfad3b1e310d7fd | 6d19a2a9f298c69cd357142a76e3f845ca5fa474 | refs/heads/master | 2023-03-04T20:47:19.979776 | 2021-02-18T13:36:14 | 2021-02-18T13:36:14 | 332,287,170 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
ASGI config for carwash_Project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'carwash_Project.settings')
application = get_asgi_application()
| [
"shota.akhlouri@gmail.com"
] | shota.akhlouri@gmail.com |
8e3355f79679a4b37fc3d64860a4ce31c5548fa8 | de8cfb5a1d39b40543e8e9d3f960f4b675781a08 | /dask/dataframe/shuffle.py | 4e85c7ce2194acd2302fb1ddeb476a43d0f86fd6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JDWarner/dask | 8b5c676d9078ecc498deb8fd47a54e1676c00a5f | 3dec8e3526520459668ced05f8e144fd7605d5ec | refs/heads/master | 2021-01-18T21:02:18.344193 | 2015-07-15T20:00:51 | 2015-07-15T20:00:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,141 | py | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from .. import threaded
from ..optimize import cull
from .core import DataFrame, Series, get, _Frame, tokens
from ..compatibility import unicode
from ..utils import ignoring
from .utils import (strip_categories, unique, shard_df_on_index, _categorize,
get_categories)
def set_index(df, index, npartitions=None, compute=True, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or df.npartitions
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1))
.compute()).tolist()
return df.set_partition(index, divisions, compute=compute, **kwargs)
def new_categories(categories, index):
""" Flop around index for '.index' """
if index in categories:
categories = categories.copy()
categories['.index'] = categories.pop(index)
return categories
def set_partition(df, index, divisions, compute=False, **kwargs):
""" Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
See Also
--------
set_index
shuffle
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
import partd
p = ('zpartd' + next(tokens),)
# Get Categories
token = next(tokens)
catname = 'set-partition--get-categories-old' + token
catname_new = 'set-partition--get-categories-new' + token
dsk1 = {catname: (get_categories, df._keys()[0]),
p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,), (partd.File,))),
catname_new: (new_categories, catname,
index.name if isinstance(index, Series) else index)}
# Partition data on disk
name = 'set-partition--partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(_set_partition, part, ind, divisions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(_set_partition, part, index, divisions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
if compute:
dsk = merge(df.dask, dsk1, dsk2, dsk3)
if isinstance(index, _Frame):
dsk.update(index.dask)
p, barrier_token = get(dsk, [p, barrier_token], **kwargs)
# Collect groups
name = 'set-partition--collect' + next(tokens)
dsk4 = dict(((name, i),
(_categorize, catname_new, (_set_collect, i, p, barrier_token)))
for i in range(len(divisions) - 1))
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
if compute:
dsk = cull(dsk, list(dsk4.keys()))
return DataFrame(dsk, name, df.columns, divisions)
def barrier(args):
list(args)
return 0
def _set_partition(df, index, divisions, p):
""" Shard partition and dump into partd """
df = df.set_index(index)
df = strip_categories(df)
divisions = list(divisions)
shards = shard_df_on_index(df, divisions[1:-1])
p.append(dict(enumerate(shards)))
def _set_collect(group, p, barrier_token):
""" Get new partition dataframe from partd """
try:
return p.get(group)
except ValueError:
return pd.DataFrame()
def shuffle(df, index, npartitions=None):
""" Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme.
See Also
--------
set_index
set_partition
partd
"""
if isinstance(index, _Frame):
assert df.divisions == index.divisions
if npartitions is None:
npartitions = df.npartitions
import partd
p = ('zpartd' + next(tokens),)
dsk1 = {p: (partd.PandasBlocks, (partd.Buffer, (partd.Dict,),
(partd.File,)))}
# Partition data on disk
name = 'shuffle-partition' + next(tokens)
if isinstance(index, _Frame):
dsk2 = dict(((name, i),
(partition, part, ind, npartitions, p))
for i, (part, ind)
in enumerate(zip(df._keys(), index._keys())))
else:
dsk2 = dict(((name, i),
(partition, part, index, npartitions, p))
for i, part
in enumerate(df._keys()))
# Barrier
barrier_token = 'barrier' + next(tokens)
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'shuffle-collect' + next(tokens)
dsk4 = dict(((name, i),
(collect, i, p, barrier_token))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
dsk = merge(df.dask, dsk1, dsk2, dsk3, dsk4)
if isinstance(index, _Frame):
dsk.update(index.dask)
return DataFrame(dsk, name, df.columns, divisions)
def partition(df, index, npartitions, p):
""" Partition a dataframe along a grouper, store partitions to partd """
rng = pd.Series(np.arange(len(df)))
if isinstance(index, Iterator):
index = list(index)
if not isinstance(index, (pd.Index, pd.core.generic.NDFrame)):
index = df[index]
if isinstance(index, pd.Index):
groups = rng.groupby([abs(hash(x)) % npartitions for x in index])
if isinstance(index, pd.Series):
groups = rng.groupby(index.map(lambda x: abs(hash(x)) % npartitions).values)
elif isinstance(index, pd.DataFrame):
groups = rng.groupby(index.apply(
lambda row: abs(hash(tuple(row))) % npartitions,
axis=1).values)
d = dict((i, df.iloc[groups.groups[i]]) for i in range(npartitions)
if i in groups.groups)
p.append(d)
def collect(group, p, barrier_token):
""" Collect partitions from partd, yield dataframes """
return p.get(group)
| [
"mrocklin@gmail.com"
] | mrocklin@gmail.com |
e0aea1f878599eef2c1896784066fbcd9cbeccef | 05355e0e90938d80dac6021e435dadb8ed19f1eb | /train.py | 255479d1ab41ff97e895e58649381977216137d5 | [] | no_license | mike1201/word-rnn-tensorflow | bfed2f1e29011433a6b200fab2499d7d1b6a6657 | ec9b87bf8ce0252495736ec66069409a3712f2d8 | refs/heads/master | 2021-09-01T08:22:01.828717 | 2017-12-26T01:21:29 | 2017-12-26T01:21:29 | 113,801,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,159 | py | from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import time
import os
from six.moves import cPickle
from utils import TextLoader
from model import Model
# Deine Parser Variable
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/tinyshakespeare',
help='data directory containing input.txt')
parser.add_argument('--input_encoding', type=str, default=None,
help='character encoding of input.txt, from https://docs.python.org/3/library/codecs.html#standard-encodings')
parser.add_argument('--log_dir', type=str, default='logs',
help='directory containing tensorboard logs')
parser.add_argument('--save_dir', type=str, default='save',
help='directory to store checkpointed models')
parser.add_argument('--rnn_size', type=int, default=256,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=25,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.002,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.97,
help='decay rate for rmsprop')
parser.add_argument('--gpu_mem', type=float, default=0.666,
help='%% of gpu memory to be allocated to this process. Default is 66.6%%')
parser.add_argument('--init_from', type=str, default=None,
help="""continue training from saved model at this path. Path must contain files saved by previous training process:
'config.pkl' : configuration;
'words_vocab.pkl' : vocabulary definitions;
'checkpoint' : paths to model file(s) (created by tf).
Note: this file contains absolute paths, be careful when moving files around;
'model.ckpt-*' : file(s) with model definition (created by tf)
""")
args = parser.parse_args()
train(args)
def train(args):
# 3-1
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length, args.input_encoding) # from utils.py
args.vocab_size = data_loader.vocab_size # arg.vocab_size = Dictionary Size
# check compatibility if training is continued from previously saved model
if args.init_from is not None:
# check if all necessary files exist
assert os.path.isdir(args.init_from), " %s must be a path" % args.init_from
assert os.path.isfile(
os.path.join(args.init_from, "config.pkl")), "config.pkl file does not exist in path %s" % args.init_from
assert os.path.isfile(os.path.join(args.init_from,
"words_vocab.pkl")), "words_vocab.pkl.pkl file does not exist in path %s" % args.init_from
ckpt = tf.train.get_checkpoint_state(args.init_from)
assert ckpt, "No checkpoint found"
assert ckpt.model_checkpoint_path, "No model path found in checkpoint"
# open old config and check if models are compatible
with open(os.path.join(args.init_from, 'config.pkl'), 'rb') as f:
saved_model_args = cPickle.load(f)
need_be_same = ["model", "rnn_size", "num_layers", "seq_length"]
for checkme in need_be_same:
assert vars(saved_model_args)[checkme] == vars(args)[
checkme], "Command line argument and saved model disagree on '%s' " % checkme
# open saved vocab/dict and check if vocabs/dicts are compatible
with open(os.path.join(args.init_from, 'words_vocab.pkl'), 'rb') as f:
saved_words, saved_vocab = cPickle.load(f)
assert saved_words == data_loader.words, "Data and loaded model disagree on word set!"
assert saved_vocab == data_loader.vocab, "Data and loaded model disagree on dictionary mappings!"
# 4-1
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'wb') as f:
cPickle.dump((data_loader.words, data_loader.vocab), f)
# 3-2
model = Model(args)
# 5-1
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(args.log_dir)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_mem)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# 5-2
train_writer.add_graph(sess.graph)
# 3-3
tf.global_variables_initializer().run()
# 5-3
saver = tf.train.Saver(tf.global_variables())
# restore model
if args.init_from is not None: # To continue the last traiining process
saver.restore(sess, ckpt.model_checkpoint_path)
# 3-4
for e in range(model.epoch_pointer.eval(), args.num_epochs):
sess.run(tf.assign(model.lr,
args.learning_rate * (args.decay_rate ** e))) # Set decay rate, model.lr = learning rate
data_loader.reset_batch_pointer() # batch = 0
state = sess.run(model.initial_state) # return zero-filled cell state ( =Co )
speed = 0
if args.init_from is None:
assign_op = model.epoch_pointer.assign(e)
sess.run(assign_op)
if args.init_from is not None:
data_loader.pointer = model.batch_pointer.eval()
args.init_from = None
# 3-5
for b in range(data_loader.pointer, data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch() # Batch Data
feed = {model.input_data: x, model.targets: y, model.initial_state: state,
model.batch_time: speed}
summary, train_loss, state, _, _ = sess.run([merged, model.cost, model.final_state,
model.train_op, model.inc_batch_pointer_op], feed)
# 5-4
train_writer.add_summary(summary, e * data_loader.num_batches + b)
speed = time.time() - start
if (e * data_loader.num_batches + b) % args.batch_size == 0:
print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, speed))
# 5-5
if (e * data_loader.num_batches + b) % args.save_every == 0 \
or (e == args.num_epochs - 1 and b == data_loader.num_batches - 1): # save for the last result
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
train_writer.close()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
9cc11c031d402aa471ad7494c5f177903ce3c503 | aede0629b0e6d013b9c961055ac0fc46acafa3b2 | /course2/assignment4.py | 9a891dab090f9a3ee168a062bbad0b788c366967 | [] | no_license | faisalsahak/python_for_everybody_specialization_coursera | aecd0078a94f924623e5bb00801066f9dbe6c26a | 42e4710a1a7324e69e7082e28e37285be2ee4294 | refs/heads/main | 2023-02-12T09:08:32.020955 | 2020-12-28T23:12:52 | 2020-12-28T23:12:52 | 325,133,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # Open the file romeo.txt and read it line by line. For each line, split the
# line into a list of words using the split() method. The program should build
# a list of words. For each word on each line check to see if the word is already
# in the list and if not append it to the list. When the program completes, sort
# and print the resulting words in alphabetical order.
# You can download the sample data at http://www.py4e.com/code3/romeo.txt
def checkWord(word, lst):
for i in range(len(lst)):
if lst[i] == word:
return True
return False
fname = input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
#split the line
words = line.split()
for word in words:
#check to see if the word is in the list
if checkWord(word, lst) :
continue
else:
#if it is ignore otherwise add it to the list
lst.append(word)
#sort the list and print
lst.sort()
print(lst)
# print(line.rstrip()) | [
"faisal@Faisals-MacBook-Pro.local"
] | faisal@Faisals-MacBook-Pro.local |
b4c53b5a157de0b8ab97f5807d5db2eb6ef40504 | 5bcaa5ddaa54c6d48dc260fc9d65499b8ba8c0bd | /FunctionalTest/tests.py | 02d3779903360d1909ed69638b9bbf6be55900fd | [] | no_license | loonwebdev2/bgrysap | a848eb226daa5e9579279c298a8f1b5cf38f01c8 | 4e719b8f8b13ff44037083a8f50942bd27320332 | refs/heads/main | 2023-06-07T18:38:56.528773 | 2021-06-27T09:06:12 | 2021-06-27T09:06:12 | 373,497,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,218 | py | from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.common.exceptions import WebDriverException
MAX_WAIT = 10
class BSMSTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def wait_for_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_for_first_entry(self):
self.browser.get('http://localhost:8000')
#self.browser.get(self.live_server_url)
self.assertIn('BARANGAY SAP MONITORING SYSTEM', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('BARANGAY SAP MONITORING SYSTEM', header_text)
inputmncplty = self.browser.find_element_by_id('Municipality')
inputbrgy = self.browser.find_element_by_id('Brgy')
inputbrgyID = self.browser.find_element_by_id('BrgyID')
self.assertEqual(inputmncplty.get_attribute('placeholder'),'Enter Municipality')
self.assertEqual(inputbrgy.get_attribute('placeholder'),'Enter Barangay')
self.assertEqual(inputbrgyID.get_attribute('placeholder'),'Enter Barangay ID')
time.sleep(1)
inputmncpltyy = self.browser.find_element_by_id('Municipality')
inputmncplty.click()
inputmncplty.send_keys('Dasmariñas City')
time.sleep(1)
inputbrgy = self.browser.find_element_by_id('Brgy')
inputbrgy.click()
inputbrgy.send_keys('Brgy. Victoria Reyes')
time.sleep(1)
inputbrgyId = self.browser.find_element_by_id('BrgyID')
inputbrgyID.click()
inputbrgyID.send_keys('301185')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
time.sleep(1)
inputaddFM= self.browser.find_element_by_id('addFM')
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddadd = self.browser.find_element_by_id('addadd')
self.assertEqual(inputaddFM.get_attribute('placeholder'),'Enter Family Member')
self.assertEqual(inputaddRS.get_attribute('placeholder'),'Enter Relation')
self.assertEqual(inputaddadd.get_attribute('placeholder'),'Enter Address')
time.sleep(1)
inputaddFM = self.browser.find_element_by_id('addFM')
inputaddFM.click()
inputaddFM.send_keys('Sasuke Uchiha')
time.sleep(1)
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddRS.click()
inputaddRS.send_keys('Father')
time.sleep(1)
inputaddadd = self.browser.find_element_by_id('addadd')
inputaddadd.click()
inputaddadd.send_keys('B 7 L 7')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
time.sleep(1)
inputaddFM = self.browser.find_element_by_id('addFM')
inputaddFM.click()
inputaddFM.send_keys('Sakura Haruno')
time.sleep(1)
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddRS.click()
inputaddRS.send_keys('Mother')
time.sleep(1)
inputaddadd = self.browser.find_element_by_id('addadd')
inputaddadd.click()
inputaddadd.send_keys('B 7 L 7')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
time.sleep(1)
'''
def test_for_second_entry(self):
self.browser.get('http://localhost:8000')
#self.browser.get(self.live_server_url)
self.assertIn('BARANGAY SAP MONITORING SYSTEM', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('BARANGAY SAP MONITORING SYSTEM', header_text)
inputbrgy = self.browser.find_element_by_id('Brgy')
inputbrgyID = self.browser.find_element_by_id('BrgyID')
self.assertEqual(inputbrgy.get_attribute('placeholder'),'Enter Barangay')
self.assertEqual(inputbrgyID.get_attribute('placeholder'),'Enter Barangay ID')
time.sleep(1)
inputbrgy = self.browser.find_element_by_id('Brgy')
inputbrgy.click()
inputbrgy.send_keys('Brgy. Sampaloc')
time.sleep(1)
inputbrgyId = self.browser.find_element_by_id('BrgyID')
inputbrgyID.click()
inputbrgyID.send_keys('301186')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
time.sleep(1)
inputaddFM= self.browser.find_element_by_id('addFM')
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddadd = self.browser.find_element_by_id('addadd')
self.assertEqual(inputaddFM.get_attribute('placeholder'),'Enter Family Member')
self.assertEqual(inputaddRS.get_attribute('placeholder'),'Enter Relation')
self.assertEqual(inputaddadd.get_attribute('placeholder'),'Enter Address')
time.sleep(1)
inputaddFM = self.browser.find_element_by_id('addFM')
inputaddFM.click()
inputaddFM.send_keys('Kirigaya Kazuto')
time.sleep(1)
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddRS.click()
inputaddRS.send_keys('Father')
time.sleep(1)
inputaddadd = self.browser.find_element_by_id('addadd')
inputaddadd.click()
inputaddadd.send_keys('B 11 L 11')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
time.sleep(1)
inputaddFM= self.browser.find_element_by_id('addFM')
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddadd = self.browser.find_element_by_id('addadd')
self.assertEqual(inputaddFM.get_attribute('placeholder'),'Enter Family Member')
self.assertEqual(inputaddRS.get_attribute('placeholder'),'Enter Relation')
self.assertEqual(inputaddadd.get_attribute('placeholder'),'Enter Address')
time.sleep(1)
inputaddFM = self.browser.find_element_by_id('addFM')
inputaddFM.click()
inputaddFM.send_keys('Yuuki Asuna')
time.sleep(1)
inputaddRS = self.browser.find_element_by_id('addRS')
inputaddRS.click()
inputaddRS.send_keys('Mother')
time.sleep(1)
inputaddadd = self.browser.find_element_by_id('addadd')
inputaddadd.click()
inputaddadd.send_keys('B 11 L 11')
bAdd = self.browser.find_element_by_id('bAdd')
bAdd.click()
#if __name__=='__main__':
# unittest.main()
user2_url = self.browser.current_url
self.assertRegex(user2_url, '/MNList/.+')
self.assertnotEqual(viewlist_url, user2_url)
inputbox = self.browser.find_element_by_id('new_id')
self.assertEqual(inputbox.get_attribute('placeholder'),'add notes')
inputbox.send_keys('Raymond')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Raymond')
inputbox = self.browser.find_element_by_id('new_id')
inputbox.send_keys('Loon')
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
self.wait_for_row_in_list_table('2: Loon')
self.wait_for_row_in_list_table('1: Raymond')
inputbox2 = self.browser.find_element_by_id('new_add')
self.assertEqual(inputbox2.get_attribute('placeholder'),'add address')
inputbox2.send_keys('Victo')
inputbox2.send_keys(Keys.ENTER)
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Rayz', page_text)
self.assertNotIn('Raizen', page_text)
inputbox = self.browser.find_element_by_id('new_id')
inputbox.send_keys('RaN')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: RaN')
ray_list_url = self.browser.current_url
self.assertRegex(ray_list_url, '/lists/.+')
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Rayz', page_text)
self.assertIn('RaN', page_text)
def test_list_and_retrieve_it_later(self):
self.browser.get(self.live_server_url)
self.assertIn('My Notes', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('My Notes', header_text)
inputbox = self.browser.find_element_by_id('new_id')
self.assertEqual(inputbox.get_attribute('placeholder'),'add notes')
inputbox.send_keys('Raymond')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Raymond')
inputbox = self.browser.find_element_by_id('new_id')
inputbox.send_keys('Loon')
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
self.wait_for_row_in_list_table('2: Loon')
self.wait_for_row_in_list_table('1: Raymond')
def wait_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('Raymond')
inputbox = self.browser.find_element_by_id('new_id')
inputbox.send_keys('Loon')
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
self.wait_for_row_in_list_table('Raymond')
self._for_row_in_list_table('Loon')
def setUp(self):
self.browser = webdriver.Firefox()
def test_browser_title(self):
self.browser.get('http://localhost:8000')
self.assertIn('My Notes', self.browser.title)
def tearDown(self):
self.browser.quit()
def test_browser_title(self):
self.browser.get('http://localhost:8000')
self.assertIn('My Notes', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('My Notes', header_text)
self.assertEqual(
inputbox.get_attribute('placeholder'),
'add notes'
)
inputbox.send_keys('Raymond')
inputbox.send_keys(Keys.ENTER)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn('1: Raymond', [row.text for row in rows])
'''
| [
"raymond.loon@gsfe.tupcavite.edu.ph"
] | raymond.loon@gsfe.tupcavite.edu.ph |
d8e80b2e43ef4f4214584e45ab62a4ea0ba00616 | 9a6bac2e1e2209b4add8304eeb33bee7c2af756a | /src/nextbox_daemon/jobs.py | 1cac0b851e43719c5fa7c9d32fa58992749d708a | [] | no_license | daringer/nextnext | 36bc0559e78c54806648479bc6afb81b342e48c6 | 05501abf801e5be6b33cc4ff7cc184aaf5092e42 | refs/heads/main | 2023-03-19T22:14:12.045738 | 2021-03-07T14:31:33 | 2021-03-07T14:31:33 | 341,177,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,755 | py | from datetime import datetime as dt
from time import sleep
from pathlib import Path
import psutil
from nextbox_daemon.consts import *
from nextbox_daemon.command_runner import CommandRunner
from nextbox_daemon.config import log
from nextbox_daemon.nextcloud import Nextcloud
class BaseJob:
name = None
interval = None
def __init__(self):
self.last_run = dt.now()
def is_due(self):
if self.interval is None:
return False
return (dt.now() - self.last_run).seconds > self.interval
def run(self, cfg):
log.debug(f"starting worker job: {self.name}")
self.last_run = dt.now()
self._run(cfg)
log.debug(f"finished worker job: {self.name}")
def _run(self, cfg):
raise NotImplementedError()
# class UpdateJob(BaseJob):
# name = "UpdateJob"
# interval = 11
# def __init__(self):
# self.snap_mgr = SnapsManager()
# super().__init__()
# def _run(self, cfg):
# while self.snap_mgr.any_change_running():
# sleep(1)
# log.debug("before check&refresh, waiting for change(s) to finish")
# log.debug("checking for needed refresh")
# updated = self.snap_mgr.check_and_refresh(["nextbox", "nextcloud-nextbox"])
# if len(updated) > 0:
# while self.snap_mgr.any_change_running():
# sleep(1)
# log.debug("refresh started, waiting for change(s) to finish")
# if "nextbox" in updated:
# CommandRunner([SYSTEMCTL_BIN, "restart", NEXTBOX_SERVICE], block=True)
# log.info("restarted nextbox-daemon due to update")
# cr1 = CommandRunner(UPDATE_NEXTBOX_APP_CMD, block=True)
# if cr1.returncode != 0:
# cr2 = CommandRunner(INSTALL_NEXTBOX_APP_CMD, block=True)
# log.info("installed nextbox nextcloud app - wasn't found for update")
class ProxySSHJob(BaseJob):
name = "ProxySSH"
interval = 291
ssh_cmd = "ssh -o StrictHostKeyChecking=accept-new -p {ssh_port} -f -N -i {key_path} -R localhost:{remote_port}:localhost:{local_port} {user}@{host}"
def __init__(self):
self.pid = None
self.nc = Nextcloud()
super().__init__()
def _run(self, cfg):
data = {
"ssh_port": 2215,
"key_path": PROXY_KEY_PATH,
"remote_port": cfg["config"]["proxy_port"],
"local_port": 80,
"host": "nextbox.link",
"user": "proxyuser"
}
# do nothing except killing process, if proxy_active == False
if not cfg["config"]["proxy_active"]:
if self.pid and psutil.pid_exists(self.pid):
psutil.Process(self.pid).kill()
self.pid = None
return
if not cfg["config"]["nk_token"]:
log.error("cannot establish reverse proxy - no token")
return
if self.pid is not None:
if not psutil.pid_exists(self.pid):
self.pid = None
log.warning("missing reverse proxy process, restarting")
# no running reverse proxy connection, establish!
if self.pid is None:
log.info("Starting reverse proxy connection")
cmd = self.ssh_cmd.format(**data).split(" ")
cr = CommandRunner(cmd, block=True)
if cr.returncode == 0:
# searching for process, as daemonizing leads to new pid
for proc in psutil.process_iter():
if proc.name() == "ssh":
self.pid = proc.pid
break
log.info(f"Success starting reverse proxy (pid: {self.pid})")
else:
cr.log_output()
log.error("Failed starting reverse proxy, check configuration")
class TrustedDomainsJob(BaseJob):
name = "TrustedDomains"
interval = 471
static_entries = ["192.168.*.*", "10.*.*.*", "172.16.*.*", "172.18.*.*", "nextbox.local"]
def __init__(self):
self.nc = Nextcloud()
super().__init__()
def _run(self, cfg):
trusted_domains = self.nc.get_config("trusted_domains")
default_entry = trusted_domains[0]
entries = [default_entry] + self.static_entries[:]
if cfg["config"]["domain"]:
entries.append(cfg["config"]["domain"])
if cfg["config"]["proxy_active"]:
entries.append(cfg["config"]["proxy_domain"])
if any(entry not in trusted_domains for entry in entries):
self.nc.set_config("trusted_domains", entries)
# # my_ip = local_ip()
# get_cmd = lambda prop: [OCC_BIN, "config:system:get", prop]
# set_cmd = lambda prop, idx, val: \
# [OCC_BIN, "config:system:set", prop, str(idx), "--value", val]
# cr = CommandRunner(get_cmd("trusted_domains"), block=True)
# trusted_domains = [line.strip() for line in cr.output if len(line.strip()) > 0]
# cr = CommandRunner(get_cmd("proxy_domains"), block=True)
# proxy_domains = [line.strip() for line in cr.output if len(line.strip()) > 0]
# # leave 0-th entry as it is all the time: worst-case fallback
# # check if any static entries are missing
# if any(entry not in trusted_domains for entry in self.static_entries):
# for idx, entry in enumerate(self.static_entries):
# log.info(f"adding '{entry}' to 'trusted_domains' with idx: {idx+1}")
# cr = CommandRunner(set_cmd("trusted_domains", idx+1, entry), block=True)
# if cr.returncode != 0:
# log.warning(f"failed: {cr.info()}")
# # check for dynamic domain, set to idx == len(static) + 1
# dyn_dom = cfg.get("config", {}).get("domain")
# idx = len(self.static_entries) + 1
# if dyn_dom is not None and dyn_dom not in trusted_domains:
# log.info(f"updating 'trusted_domains' with dynamic domain: '{dyn_dom}'")
# cr = CommandRunner(set_cmd(idx, dyn_dom),
# block=True)
# if cr.returncode != 0:
# log.warning(f"failed adding domain ({dyn_dom}) to trusted_domains")
# # check and set proxy domain, set to idx == 1
# proxy_dom = cfg.get("config", {}).get("proxy_domain")
# if proxy_dom and cfg.get("config", {}).get("proxy_active"):
# idx = 1
# if proxy_dom is not None and proxy_dom not in proxy_domains:
# log.info(
# f"updating 'proxy_domains' with proxy domain: '{proxy_dom}'")
# cr = CommandRunner(set_cmd(idx, proxy_dom), block=True)
# if cr.returncode != 0:
# log.warning(
# f"failed adding domain ({proxy_dom}) to proxy_domains")
class JobManager:
def __init__(self, config):
self.cfg = config
self.jobs = { }
def register_job(self, job):
log.info(f"registering job {job.name}")
if job.name in self.jobs:
log.warning(f"overwriting job (during register) with name: {job.name}")
self.jobs[job.name] = job()
def handle_job(self, job_name):
if job_name not in self.jobs:
log.error(f"could not find job with name: {job_name}")
return
# run actual job
try:
self.jobs[job_name].run(self.cfg)
except Exception as e:
log.error(f"failed running job: {job_name}")
log.error(msg="EXC", exc_info=e)
def get_recurring_job(self):
for name, job in self.jobs.items():
if job.is_due():
return name
| [
"coder@safemailbox.de"
] | coder@safemailbox.de |
6284288aa94622d03c3f24e10f3eb63df2e27dd0 | 22bcb68759d516eea70d18116cd434fcd0a9d842 | /scrap/infibeam_books_scrap1.py | 0fbafc3b59b8a0f06df21d43016818f71ac9c0f6 | [] | no_license | lovesh/abhiabhi-web-scrapper | 1f5da38c873fea74870d59f61c3c4f52b50f1886 | b66fcadc56377276f625530bdf8e739a01cbe16b | refs/heads/master | 2021-01-01T17:16:51.577914 | 2014-10-18T15:56:42 | 2014-10-18T15:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,183 | py | import downloader
import dom
import urllib2
import re
import time
import math
import pymongo
from collections import defaultdict
import datetime
siteurl='http://www.infibeam.com'
category_browser='http://www.infibeam.com/Books/BrowseCategories.action'
subcategory_browser='http://www.infibeam.com/Books/BrowseCategories.action'
books=[]
book_urls=defaultdict(list)
logfile=open('infibeam_books_log.txt','w')
dl=downloader.Downloader()
dl.addHeaders({'Origin':siteurl,'Referer':siteurl})
shipping_pattern = re.compile('in (\d+) business days', re.I)
def getCategoryUrls():
category_page=dom.DOM(url=category_browser)
category_path='//div[@id="allcategories"]//h3/a'
category_urls=dict((link[0],'http://www.infibeam.com'+link[1]) for link in category_page.getLinksWithXpath(category_path))
return category_urls
def getSubCategoryUrls():
category_page=dom.DOM(url=subcategory_browser)
subcategory_path='//div[@id="allcategories"]//ul/li/a'
subcategory_urls=set('http://www.infibeam.com'+link[1] for link in category_page.getLinksWithXpath(subcategory_path))
return subcategory_urls
def getBookUrlsFromPage(html):
book_url_path='//ul[@class="search_result"]//span[@class="title"]/h2/a'
page_dom=dom.DOM(string=html)
links=set(l[1] for l in page_dom.getLinksWithXpath(book_url_path))
return links
def getBookUrlsOfCategory(cat,category_url):
page=urllib2.urlopen(category_url)
html=page.read()
page.close()
page=dom.DOM(string=html)
urls=getBookUrlsFromPage(html) #get book urls from first page
count_path='//div[@id="search_result"]/div/b[2]'
count=int(page.getNodesWithXpath(count_path)[0].text.replace(',',''))
print count
if count>20:
num_pages=int(math.ceil(count/20.0))
page_urls=set(category_url+'/search?page='+str(page) for page in xrange(2,num_pages))
print page_urls
dl.putUrls(page_urls)
result=dl.download()
for r in result:
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
urls.update(getBookUrlsFromPage(html))
url_dict={}
for url in urls:
url_dict[url]=cat
return url_dict
def getAllBookUrls():
global book_urls
category_urls=getCategoryUrls()
start=time.time()
for cat in category_urls:
print('Getting book urls of category %s\n\n'%cat)
urls=getBookUrlsOfCategory(cat,category_urls[cat])
print('Witring book urls of category %s\n\n'%cat)
logfile.write('Witring book urls of category %s\n\n'%cat)
for url in urls:
logfile.write(url+'\n')
book_urls[url].append(urls[url])
logfile.write('\n\n\n\n')
finish=time.time()
print "All book urls(%s) fetched in %s\n\n",(len(book_urls),str(finish-start))
logfile.write("All book urls fetched in %s\n\n"%str(finish-start))
logfile.flush()
return book_urls
def parseBookPage(url=None,string=None):
book={}
print url
if url:
try:
doc=dom.DOM(url=url)
except urllib2.HTTPError:
return False
else:
doc=dom.DOM(string=string)
addBox=doc.getNodesWithXpath('//input[@class="buyimg "]')
if url:
book['url']=url
if addBox: #availability check
book['availability']=1 # availability 1 signals "in stock"
m = shipping_pattern.search(doc.html)
if m:
book['shipping']=(int(m.group(1)), )
else:
book['availability']=0
price_path = '//span[@class="infiPrice amount price"]'
price = doc.getNodesWithXpath(price_path)
if len(price) > 0:
book['price']=int(price[0].text.replace(',', ''))
img_path="//img[@id='imgMain']"
book['img_url']=doc.getImgUrlWithXpath(img_path)
tbody_path='//div[@id="ib_products"]/table/tbody'
if len(doc.getNodesWithXpath(tbody_path)) == 0:
tbody_path='//div[@id="ib_products"]/table'
if len(doc.getNodesWithXpath(tbody_path)) == 0:
tbody_path='//table[@style="color:#333; font:verdana,Arial,sans-serif;"]'
data=doc.parseTBody(tbody_path)
if data:
if 'author' in data:
data['author']=data['author'].split(',')
if 'publish date' in data:
m=re.search('(\d+)-(\d+)-(\d+)',data['publish date'])
if m:
data['pubdate']=datetime.date(int(m.group(1)),int(m.group(2)),int(m.group(3)))
book.update(data)
book['scraped_datetime']=datetime.datetime.now()
book['last_modified_datetime']=datetime.datetime.now()
book['site']='infibeam'
product_history={}
if 'price' in book:
product_history['price']=book['price']
if 'shipping' in book:
product_history['shipping']=book['shipping']
product_history['availability']=book['availability']
product_history['datetime']=book['last_modified_datetime']
book['product_history']=[product_history,]
return book
def go():
global books
urls=getAllBookUrls()
dl.putUrls(urls,10)
start=time
start=time.time()
result=dl.download()
finish=time.time()
logfile.write("All books(%s) downloaded in %s"%(len(books),str(finish-start)))
start=time.time()
for r in result:
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
book=parseBookPage(string=html)
book['url']=r
if r.find('/Books/') == -1:
book['type']='ebook'
else:
book['type']='book'
books.append(book)
finish=time.time()
logfile.write("All books parsed in %s"%str(finish-start))
return books
def prepareXMLFeed():
books=go()
root=dom.XMLNode('books')
start=time.time()
for book in books:
child=root.createChildNode('book')
child.createChildNodes(book)
f=open('infibeam_books.xml','w')
f.write(root.nodeToString())
f.close()
finish=time.time()
logfile.write("XML file created in %s"%str(finish-start))
| [
"lovesh.bond@gmail.com"
] | lovesh.bond@gmail.com |
1dffe5f62462692c17b0917a0d9f33174704c851 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/1215.py | 2727d86b5e032b798fe4ae7360d004c88b3cc807 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | def tidy(n):
a = list(str(n))
if len(a)>=2:
for i in range(len(a)-1):
if a[i]>a[i+1]:
a[i] = str(int(a[i])-1)
for j in range(i+1, len(a)):
a[j] = '9'
a = ''.join(a)
out = int(a)
return out
def check_tidy(n):
a = tidy(n)
b = list(str(a))
b.sort()
b = ''.join(b)
b = int(b)
if a == b:
return a
else:
return check_tidy(a)
in_f = open("i.in", 'r')
ou_f = open("o.out", 'w')
T = int(in_f.readline())
for i in range(T):
s = in_f.readline().strip()
k = int(s)
out = check_tidy(k)
j = "Case #" + str(i+1) +": " + str(out) + "\n"
ou_f.write(j)
in_f.close()
ou_f.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
b226a98fb2abde8bc174d9ed9e9c4d868c5bcc3a | ba6b396cda4c33e89400bc28a8d540d30ba51492 | /Laboratorio 2-Nicole Brito/subCocina.py | c689492865593988c0a40ca0899ca76002e030ba | [] | no_license | unimetadmin/laboratorio-2-mqtt-nicoolebs | f8195be3d6ccb39173df5163415da30cb0c5eeb6 | fcc61696d77249522889deaae21ea47e1999a8cf | refs/heads/main | 2023-05-01T06:37:09.148909 | 2021-05-10T20:53:15 | 2021-05-10T20:53:15 | 366,171,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | # Laboratorio 2: IoT MQTT y Plotly
# Alumna: Nicole Brito Strusi
# Carnet: 20181110056
# SUSCRIPTOR DE LA COCINA DE LA CASA
#Importaciones
import sys
import paho.mqtt.client
#BASE DE DATOS
#Importaciones para la BD
import psycopg2
import conexionBD
import json
#Para insertar en la BD los datos de la nevera - Temperatura
def insertar_nevera(fecha, temperatura):
try:
#Conexión con la BD
conexion = conexionBD.get_connection()
cursor = conexion.cursor()
query = """INSERT INTO public.nevera(fecha_temperatura, temperatura) VALUES (%s,%s);"""
cursor.execute(query,(fecha,temperatura))
#Para que se vea reflejado en la BD
conexion.commit()
#res = cursor.fetchone()
conexionBD.close_connection(conexion)
except (Exception, psycopg2.Error) as error:
print('Error obteniendo la data',error)
#Para insertar en la BD los datos de los Hielos - nevera
def insertar_hielo(fecha, hielo):
try:
#Conexión con la BD
conexion = conexionBD.get_connection()
cursor = conexion.cursor()
query = """INSERT INTO public.hielo(fecha_hielo, hielo) VALUES (%s, %s);"""
cursor.execute(query,(fecha,hielo))
#Para que se vea reflejado en la BD
conexion.commit()
#res = cursor.fetchone()
conexionBD.close_connection(conexion)
except (Exception, psycopg2.Error) as error:
print('Error obteniendo la data',error)
#Para insertar en la BD los datos de la Olla
def insertar_olla(fecha, olla, mensaje):
try:
#Conexión con la BD
conexion = conexionBD.get_connection()
cursor = conexion.cursor()
query = """INSERT INTO public.olla(fecha_olla, olla, mensaje_olla) VALUES (%s, %s, %s);"""
cursor.execute(query,(fecha,olla,mensaje))
#Para que se vea reflejado en la BD
conexion.commit()
#res = cursor.fetchone()
conexionBD.close_connection(conexion)
except (Exception, psycopg2.Error) as error:
print('Error obteniendo la data',error)
#Canal al que se suscribe, en este caso, a la cocina de la casa
def on_connect(client, userdata, flags, rc):
print('connected (%s)' % client._client_id)
client.subscribe(topic='casa/cocina/#', qos=2)
#Mensajes que escucha y publica en la BD los datos
def on_message(client, userdata, message):
print('------------------------------')
print('Tópico: %s' % message.topic)
print('Mensaje: %s' % message.payload)
print('QOS: %d' % message.qos)
#Dependiendo del parámetro "Soy" pasado por el payload hace:
if json.loads(message.payload)["Soy"] == "Temperatura":
#Insertando datos en la BD de la temperatura de la nevera
insertar_nevera(json.loads(message.payload)["Fecha Temperatura"], json.loads(message.payload)["Temperatura"])
if json.loads(message.payload)["Soy"] == "Hielo":
#Insertando datos en la BD de los hielos de la nevera
insertar_hielo(json.loads(message.payload)["Fecha Hielo"], json.loads(message.payload)["Hielo"])
if json.loads(message.payload)["Soy"] == "Olla":
#Insertando datos en la BD de la olla
insertar_olla(json.loads(message.payload)["Fecha Temperatura Olla"], json.loads(message.payload)["Temperatura Olla"],json.loads(message.payload)["Mensaje Olla"])
def main():
#Cliente
client = paho.mqtt.client.Client(client_id='cocina-subs', clean_session=False)
client.on_connect = on_connect
client.on_message = on_message
client.connect(host='127.0.0.1', port=1883)
client.loop_forever()
if __name__ == '__main__':
main()
sys.exit(0) | [
"59893575+nicoolebs@users.noreply.github.com"
] | 59893575+nicoolebs@users.noreply.github.com |
d0efe615df556ed9568c2a46dc7edb0e76aaed81 | 88a21d7c30e308c048ba2b82fd65835d60811a6a | /Home.py | b1745874f94e487b684787feaf14fe71b1dd47a9 | [] | no_license | CameronSprowls/ObservableTextBasedAdventureGame | 35312bb253714ba044ad8bb784de68c0c68cfda7 | 3c8c8ca23404c19eb15dbd3ab2055a35364aecb5 | refs/heads/master | 2021-08-07T06:44:40.468100 | 2017-11-07T19:21:40 | 2017-11-07T19:21:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | """
File to hold the class for a home.
Homes observe the monsters living within, will be notified when a monster in defeated
"""
from Monster import *
from Observer import Observer
from Enums import Monsters
class Home(Observer, Observable):
"""
Class for the home. Creates a home and has accessors and mutators.
"""
# Instance variables
monsters = []
x_pos = 0
y_pos = 0
def __init__(self, x_pos, y_pos):
"""
Constructor of a home. Set up all the basic properties of one
"""
super().__init__()
self.x_pos = x_pos
self.y_pos = y_pos
self.monsters = []
# Create a random (0-10) number of monsters in the house
self.gen_monsters(random.randint(1, 10))
def sub(self, new_sub):
"""Subscribe the new sub to the house service"""
super().sub(new_sub)
def gen_monsters(self, num_monsters):
"""
Get random monsters an populate the house with them
:param num_monsters: the number of monsters that is to be
place inside of the house
"""
# Decide a random number of monsters to add to the home
for x in range(num_monsters):
# Create random monster, add to home
monster_id = Monsters(random.randint(1, 4))
if monster_id is Monsters.ZOMBIE:
self.monsters.append(Zombie(self))
elif monster_id is Monsters.VAMPIRE:
self.monsters.append(Vampire(self))
elif monster_id is Monsters.GHOUL:
self.monsters.append(Ghoul(self))
else:
self.monsters.append(Werewolf(self))
def get_monsters(self):
"""
Getter for the monsters.
"""
return self.monsters
def get_x_pos(self):
"""
Gets the x position of house. Useful to figure out where the house is
for movement.
"""
return self.x_pos
def get_y_pos(self):
"""
Gets the y position of house. Useful to figure out where the house is
for movement.
"""
return self.y_pos
def update(self, *args, **kwargs):
"""
Called when a monster dies, replaces that monster with a person, and lets the game know
that a monster is gone and they should update the number of monsters they have
:param args: Monsters that were killed
"""
# Get rid of the monsters that were passed in, replace them with Persons
defeated = 0
for x in args:
for y in self.get_monsters():
if x is y:
self.monsters.remove(x)
self.monsters.append(Person(self))
defeated += 1
break
# Let the game know that a monster was defeated
super().update_observers(defeated)
| [
"poiklasderf@gmail.com"
] | poiklasderf@gmail.com |
178a568eb15b8f1f6c119a46bb1e92c144ad41c9 | 2104433581437ff225d406d4677d3b21d901b084 | /import/sodexo_import.py | c591297511f1128caab912655c71e9812b4078a1 | [] | no_license | Podbi/accounting | fd49442a79caf4a82fbbf7638929b7ca84e5389a | f2b5670627ed056821de05b3ca40c24ac61f3ce3 | refs/heads/master | 2021-01-12T08:29:26.972803 | 2020-11-02T19:56:47 | 2020-11-02T19:56:47 | 76,596,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,809 | py | # -*- coding: utf-8 -*-
from datetime import datetime
import sys
import csv
import sqlite3
import re
CURRENCY_CZK = 1
SOURCE_VALET = 1
from service.repository import Repository
from service.typeResolver import TypeResolver
if len(sys.argv) < 3:
raise Exception('Málo vstupních argumentů. Zadej název souboru a rok')
date = ''
records = []
filepath = sys.argv[1]
year = sys.argv[2]
database = sqlite3.connect('../db.sqlite3')
resolver = TypeResolver(database)
repository = Repository(database)
print('Soubor',filepath,'bude otevřen a zpracován')
with open(filepath, 'r', encoding='utf-8', errors='replace') as file:
reader = csv.reader(file, delimiter='\t')
print('Soubor byl úspěšně otevřen, zahajuji zpracovávání textového souboru')
for row in reader:
if len(row) < 2:
continue
if row[1] != '' and row[1] != None:
match = re.search(r'([0-9]+)\.([0-9]+)\.', row[1])
if match != None:
date = datetime.strptime(row[1], '%d.%m.%Y %H:%M:%S')
date = date.replace(hour=0, minute=0, second=0)
if len(row) < 5:
raise Exception('Řádek s popisem "{0}" nemá dostatek hodnot'.format(row[3]))
match = re.search(r'(\-)?[0-9]+', row[0])
if match != None:
money = match.group(0)
else:
raise Exception('Řádek s popisem "{0}" neobsahuje validní údaje o měně: "{1}"'.format(row[3], row[0]))
description = 'Oběd (Stravování)'
place = row[3].capitalize()
if place == '' and int(money) > 0:
description = 'Měsíční Stravenky'
place = 'Dixons Carphone CoE'
records.append({
'date' : date,
'money' : int(money),
'currency' : CURRENCY_CZK,
'description' : description,
'place' : place,
'type' : resolver.resolve('stravovani'),
'source' : SOURCE_VALET
})
print('')
print('Celkem bylo nalezeno',len(records),'záznamů, které budou vloženy do databáze')
print('Vkládám záznamy')
counter = 0
for record in records:
if (repository.hasRecord(record['date'], record['description'], record['money'], record['currency'], record['source'])):
print('Záznam ze dne', record['date'], 'za', record['money'], 'již existuje.')
else:
repository.createRecord(
record['date'],
record['description'],
record['place'],
record['money'],
record['currency'],
record['source'],
record['type']
)
counter = counter + 1
print('.', end='')
repository.commit()
print('')
print(counter,'záznamů bylo úspěšně vloženo') | [
"podbi@centrum.cz"
] | podbi@centrum.cz |
fac506dcca52c0a95db2b6cf2410eac72f4596a2 | 2a900bc26b3562dac4e343730e749017afa1d4a7 | /setup.py | 52f4bb5b447e480e7e0f2c97b720887cd6a56bd5 | [] | no_license | SmallCell/fabric-expect | 95736e5e8df0aef83af87866ee01127844928358 | 54d75abdcb65ddf1f35f88e75e66332ec96f3e8e | refs/heads/master | 2016-08-03T15:01:35.400802 | 2014-04-08T14:01:24 | 2014-04-08T14:01:24 | 17,396,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import io
import codecs
import os
import sys
import fabric.contrib.expect
version = '0.0.1'
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='fabric-expect',
version=version,
description='How to answer to prompts automatically with python fabric',
long_description=long_description,
classifiers=[
'License :: OSI Approved :: BSD License',
#'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.7',
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Clustering",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
],
author='ibnHatab',
author_email='callistoiv+pypi@gmail.com',
url='https://github.com/SmallCell/fabric-expect',
license='BSD',
cmdclass={'test': PyTest},
test_suite='test.test_expect',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['fabric','fabric.contrib'],
include_package_data=True,
zip_safe=False,
tests_require=['nose', 'mock', 'coverage'],
install_requires=[
# -*- Extra requirements: -*-
'fabric',
],
extras_require={
'testing': ['pytest'],
}
)
| [
"lib.aca55a@gmail.com"
] | lib.aca55a@gmail.com |
97c119e854804fc29fdc0b228df9eee8854e6983 | b0d8642517d423b59ccebb8983e265bfd35f87c6 | /modules.py | f4745c39ae5f9ad2cb8fba9bc7f8a89c9606dc77 | [
"MIT"
] | permissive | vuksamardzic/basic-py | 4016cf13f9efa225345f4547e59ecb0725b8d888 | 356766f895660376530a4734d751cea281d1920e | refs/heads/master | 2020-03-21T09:13:59.269764 | 2018-06-23T09:43:14 | 2018-06-23T09:43:14 | 138,389,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | import greet
from greet import say_hello
greet.say_hello('Vuk')
say_hello('Tim')
| [
"samardzic.vuk@gmail.com"
] | samardzic.vuk@gmail.com |
31d77e7d8179eb664dab536eb6a8bce353785b52 | 59bdad3fb33332b8d05f07107dcae45b6f9ba662 | /jornada/migrations/0001_initial.py | 500c98888d9f6de94ff035386c103dd9129bf53f | [] | no_license | dyronrh/hcmfront-for-test | 8bbad6ab75fd82fa84d75fa942e4d39033080934 | 911d97dffc3f788c0419233535a5e782cca776a5 | refs/heads/master | 2020-04-28T07:14:37.110512 | 2019-03-11T22:38:58 | 2019-03-11T22:38:58 | 175,084,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-08 22:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JornadaClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo', models.CharField(choices=[('jc', 'Jornada Completa'), ('jp', 'Jornada Parcial'), ('j180h', '180 horas mensuales'), ('j45h', '45 horas semanales'), ('j60h', '60 horas semanales'), ('jadt', 'Autorizada por la Dirección del Trabajo'), ('jb', 'Bisemanal'), ('jra', 'Registro de Asistencia')], max_length=6)),
],
),
]
| [
"dyronrh@yahoo.es"
] | dyronrh@yahoo.es |
bb7eb4a7d4345b902f1d0c5637b12669cb5d8e4d | 0c79382188946515059a26b636ea4689edc24faa | /feitos/sequencias2.py | e733a577f467ecd3345a8d3b57963444aa58533a | [] | no_license | flaviocardoso/uri204719 | cbd2b9cb5fbb183d891128c05eb63ee8e4cb78c0 | 9ec7a9c9e7bb788f61a80dce203b9a0aea9d93f3 | refs/heads/master | 2021-09-11T19:51:14.831377 | 2018-04-11T17:31:05 | 2018-04-11T17:31:05 | 113,496,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | #!/usr/bin/python3
#sequencias2.py 1156
S = 0
i, j = 0, 1
while j <= 39:
S += (j/pow(2, i))
j += 2
i += 1
print("{0:.2f}".format(S)) | [
"flavioc401@gmail.com"
] | flavioc401@gmail.com |
8f58e730976b0a677b30bf61b7b99c9ee4cc60a3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnaiddecamp.py | 19e17d66f304241ad72952f45ad26016da559867 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 139 | py | ii = [('MarrFDI3.py', 1), ('ClarGE2.py', 19), ('ClarGE.py', 2), ('WadeJEB.py', 1), ('SoutRD.py', 1), ('ThomGLG.py', 1), ('MackCNH2.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
da00b288e95f277ab14510e7ad75bc93260b4c12 | ccdedc53d4df116f3764c0127fcad773bfd9725f | /app/views/__init__.py | 7ca4f15b37f096ef91004a60b0f441f15454d0b6 | [] | no_license | artdokxxx/gcore | 7538b7aa048395880004ebc81bc6180e33295a47 | 9f1923ffe80d75a23f08138f9d02f3f84c22caed | refs/heads/master | 2020-07-05T16:35:27.296598 | 2019-08-16T09:43:29 | 2019-08-16T09:43:29 | 202,700,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from .project_info import ProjectInfoView
| [
"artdokxxx@gmail.com"
] | artdokxxx@gmail.com |
46bc6096a386be5c695dcde7e327e88b94c36061 | d4a8e4a398ceef4cb2e952892824340a498929c6 | /main.py | 586a4fb05baf50974d3f847344bbc3a506142d4c | [] | no_license | RimveUnfinishedProjects/Chat4Hack | 0f490669b4d4d3c5cfa41c59b2bd652b4ef36696 | 32f127ece76f9814af3e711db961f8dffd475373 | refs/heads/master | 2022-04-02T21:49:04.300853 | 2019-12-23T00:38:15 | 2019-12-23T00:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,507 | py | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from database import DataBase
from kivy.core.window import Window
import calendar
import time
from kivy.clock import Clock
Window.size = (800, 650)
class CreateAccountWindow(Screen):
namee = ObjectProperty(None)
email = ObjectProperty(None)
password = ObjectProperty(None)
def submit(self):
if self.namee.text != "" and self.email.text != "" and self.email.text.count("@") == 1\
and self.email.text.count(".") > 0:
if self.password != "": # password not blank
db.add_user(self.email.text, self.password.text, self.namee.text)
self.reset()
sm.current = "login" # change window to login page
else:
invalid_form()
else:
invalid_form()
def login(self):
self.reset()
sm.current = "login"
def reset(self):
self.email.text = ""
self.password.text = ""
self.namee.text = ""
class LoginWindow(Screen):
email = ObjectProperty(None)
password = ObjectProperty(None)
def login_btn(self):
if db.validate(self.email.text, self.password.text):
MainWindow.current = self.email.text
self.reset()
sm.current = "main"
else:
invalid_login()
def create_btn(self):
self.reset()
sm.current = "create"
def reset(self):
self.email.text = ""
self.password.text = ""
"""
url = 'https://chat4hack.firebaseio.com/.json'
def post(self, JSON):
to_database = json.loads(JSON)
requests.patch(url=self.url, json=to_database)
auth_key = "UnhYnU6mTj1XxiLFPWMy77fXrXgew281oOHcNS0E" # this is from firebase secret settings
def get_data(self):
request = requests.get(self.url + "?auth=" + self.auth_key)
data = str(request.json())[12:-4]
data = data.replace("'}, '", "\n")
data = data.splitlines()
users = {}
for line in data: # getting all info about user from JSON structure and put into dict
name = line.split("'")[0]
password = line[::-1].split("'")[0]
password = password[::-1]
index = line.find("'") # index for finding starting place of creating date
create_date = line[index+16:index+26]
line = line[index+39:]
email = line.split("'")[0]
users[email] = (password, name, create_date)
"""
class MainWindow(Screen):
n = ObjectProperty(None)
created = ObjectProperty(None)
email = ObjectProperty(None)
clndr = ObjectProperty(None)
localtime = ObjectProperty(None)
current = ""
@staticmethod #rimve@gmail.com
def logout():
sm.current = "login"
def on_enter(self, *args):
password, name, created = db.get_user(self.current)
clndr = calendar.month(2019, 11)
Clock.schedule_interval(self.update_time, 1)
self.n.text = "Account name: " + name
self.email.text = "Account email: " + self.current
self.created.text = "Account Created On: " + created
self.clndr.text = clndr
def update_time(self, dt):
self.localtime.text = time.strftime("Time: %H:%M:%S")
class WindowManager(ScreenManager):
pass
def invalid_login():
pop = Popup(title='Invalid Login',
content=Label(text='Invalid username or password.'),
size_hint=(None, None), size=(400, 400))
pop.open()
def invalid_form():
pop = Popup(title='Invalid Form',
content=Label(text='Please fill in all inputs with information.'),
size_hint=(None, None), size=(400, 400))
pop.open()
kv = Builder.load_file("my.kv")
sm = WindowManager()
db = DataBase("users.txt")
screens = [LoginWindow(name="login"), CreateAccountWindow(name="create"), MainWindow(name="main")]
for screen in screens:
sm.add_widget(screen)
sm.current = "login"
class Chat4HackApp(App):
def build(self):
self.icon = 'images/ico.ico'
return sm
if __name__ == "__main__":
Chat4HackApp().run()
| [
"noreply@github.com"
] | noreply@github.com |
c6ea130a24af604e6bb0ea95cbdbb09ad519387e | d567f92ff1524e67d4129f1a7e853b3427b79e87 | /test5.py | ea86ebd5383f5da58dfb9bd8d893565cb3c18f16 | [] | no_license | XinjieWen/test | 667f6580186fbae9385c9e386e48a9f6afce7ab6 | cd7d4162b5e373746f371ce25c71e7c4fab5198c | refs/heads/master | 2022-05-13T01:03:26.237326 | 2022-05-09T07:56:35 | 2022-05-09T07:58:24 | 134,082,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print('hello5')
| [
"jasonxjwen@tencent.com"
] | jasonxjwen@tencent.com |
97fa49f015a6ef465e398e28ba407222aa6ef2cd | fed1dc97a37469e49c817d99124deec2dd1e8ff6 | /pythonSearch/requestsCSDN.py | bcc2086ad8b9864cb536fee7c4f221d5847c897e | [] | no_license | AngiesEmail/ARTS | 38b6e798730f8a7117a8df6ae3f89eadcf020911 | 6ccc54f25f7f3e0cb16a87cfd4ad577b5ba1f436 | refs/heads/master | 2020-03-26T03:57:21.447886 | 2019-10-07T07:44:15 | 2019-10-07T07:44:15 | 144,478,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # -*- coding:utf-8 -*-
# 导入库
import requests
import os
import sys
from bs4 import BeautifulSoup
def getHTMLText(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
demo = r.text
soup = BeautifulSoup(demo,'html.parser')
result = soup.article.get_text()
title = soup.title.get_text()
path = sys.argv[1]+'/'+title+'.md'
fileObject = open(path, 'wb')
fileObject.write(result.encode('utf-8'))
fileObject.close()
except:
return "产生异常"
if __name__=='__main__':
url = sys.argv[2]
getHTMLText(url) | [
"yuanfangrui@topjoy.com"
] | yuanfangrui@topjoy.com |
93149bb3a6b9892081504d75a719a82d1a7fa2e1 | f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc | /examples/transform_cube.py | f9f45274bed9265c28b79a03dfd4e3ccccfa5ad1 | [
"MIT"
] | permissive | triroakenshield/ezdxf | 5652326710f2a24652605cdeae9dd6fc58e4f2eb | 82e964a574bcb86febc677bd63f1626318f51caf | refs/heads/master | 2023-08-17T12:17:02.583094 | 2021-10-09T08:23:36 | 2021-10-09T08:23:36 | 415,426,069 | 1 | 0 | MIT | 2021-10-09T21:31:25 | 2021-10-09T21:31:25 | null | UTF-8 | Python | false | false | 1,407 | py | # Copyright (c) 2020-2021 Manfred Moitzi
# License: MIT License
from pathlib import Path
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import UCS
DIR = Path("~/Desktop/Outbox").expanduser()
p = [
(0, 0, 0),
(1, 0, 0),
(1, 1, 0),
(0, 1, 0),
(0, 0, 1),
(1, 0, 1),
(1, 1, 1),
(0, 1, 1),
]
doc = ezdxf.new()
msp = doc.modelspace()
block = doc.blocks.new("block_4m3")
cube = block.add_mesh()
with cube.edit_data() as mesh_data:
mesh_data.add_face([p[0], p[1], p[2], p[3]])
mesh_data.add_face([p[4], p[5], p[6], p[7]])
mesh_data.add_face([p[0], p[1], p[5], p[4]])
mesh_data.add_face([p[1], p[2], p[6], p[5]])
mesh_data.add_face([p[3], p[2], p[6], p[7]])
mesh_data.add_face([p[0], p[3], p[7], p[4]])
mesh_data.optimize()
# Place untransformed cube, don't use the rotation
# attribute unless you really need it, just
# transform the UCS.
blockref = msp.add_blockref(name="block_4m3", insert=(0, 0, 0))
# First rotation about the local x-axis
ucs = UCS().rotate_local_x(angle=math.radians(45))
# same as a rotation around the WCS x-axis:
# ucs = UCS().rotate(axis=(1, 0, 0), angle=math.radians(45))
# Second rotation about the WCS z-axis
ucs = ucs.rotate(axis=(0, 0, 1), angle=math.radians(45))
# Last step transform block reference from UCS to WCS
blockref.transform(ucs.matrix)
zoom.extents(msp)
doc.saveas(DIR / "cube.dxf")
| [
"me@mozman.at"
] | me@mozman.at |
5482f68ce92c5b80a7fc402e4fa7bc06151e25cc | f250ec4dd1b49750f5b42d577cf42b47c929d7bc | /python_django/dojo_secrets_2/apps/secrets_app/views.py | 59ef738b1e2532dcb2d144c4703af0437a352c37 | [] | no_license | charlie320/programming_labs_projects | 2204607109d49b2a5dd738cd067b073aef44e0b0 | 33b32a6ff83ddb1e7ef832eb8286e42d3a6eca12 | refs/heads/master | 2021-09-05T11:21:29.622439 | 2018-01-26T20:54:56 | 2018-01-26T20:54:56 | 106,962,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | from django.shortcuts import render, redirect, reverse
from django.db.models import Count
from ..login_app.models import User
from .models import Secret
# Create your views here.
def index(request):
current_user = User.objects.currentUser(request)
secrets = Secret.objects.annotate(num_likes=Count('liked_by'))
context = {
'user' : current_user,
'secrets' : secrets,
}
return render(request, 'secrets_app/index.html', context)
def create(request):
if request.method == "POST":
if len(request.POST['content']) != 0:
current_user = User.objects.currentUser(request)
secret = Secret.objects.createSecret(request.POST, current_user)
return redirect(reverse('success'))
def like(request, id):
current_user = User.objects.currentUser(request)
secret = Secret.objects.get(id=id)
current_user.likes.add(secret)
return redirect(reverse('success'))
def unlike(request, id):
current_user = User.objects.currentUser(request)
secret = Secret.objects.get(id=id)
current_user.likes.remove(secret)
return redirect(reverse('success'))
def delete(request, id):
if request.method == "POST":
secret = Secret.objects.get(id=id)
current_user = User.objects.currentUser(request)
if current_user.id == secret.user.id:
secret.delete()
return redirect(reverse('success'))
| [
"Charlie@CharlieulieiMac.home"
] | Charlie@CharlieulieiMac.home |
40c80298125d22d148038ffefb051f1a267a1a50 | 6e3b8a04a074c30cf4fc43abe7a208f772df795b | /Mid-Exam/2-task.py | 58c67fe8bae90b41f94cde4ab24bb1499bf056e6 | [] | no_license | majurski/Softuni_Fundamentals | dc0808fdaab942896eebfb208fb6b291df797752 | bf53a9efdcb45eb911624ab86d762a6281391fb8 | refs/heads/master | 2022-11-29T06:06:06.287984 | 2020-08-10T19:36:18 | 2020-08-10T19:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | arr = input().split()
new_arr = list(map(int, arr))
result = []
line = input()
while line != "end":
value = line.split()
command = value[0]
if command == "swap":
index_1 = int(value[1])
index_2 = int(value[2])
new_arr[index_1], new_arr[index_2] = new_arr[index_2], new_arr[index_1]
elif command == "multiply":
index_1 = int(value[1])
index_2 = int(value[2])
multiplied = new_arr[index_1] * new_arr[index_2]
new_arr[index_1] = multiplied
elif command == "decrease":
for val in new_arr:
val -= 1
result.append(val)
line = input()
print(", ".join(list(map(str, result))))
# print(', '.join([str(x) for x in last]))
| [
"noreply@github.com"
] | noreply@github.com |
8ddd904d104bd85b3787c7f127714834d0501a7f | 042620764a26feea1dc8fd6d040dc48046767373 | /app/app/__init__.py | f1e165aa18f436b99ff63069f7a469a38559a4c4 | [] | no_license | gwjrl520/FlaskProjets | ab29894d1920ce0015efcf9d003a336626e3fb69 | c8a1d346a9d3f6e85a754cfcc7f857310d49050d | refs/heads/main | 2023-05-03T05:35:26.808551 | 2021-05-24T08:44:17 | 2021-05-24T08:44:17 | 369,785,691 | 0 | 1 | null | 2021-05-24T08:44:18 | 2021-05-22T11:03:42 | Python | UTF-8 | Python | false | false | 1,299 | py | from flask import Flask
from commons.settings.extensions import apispec
from commons.settings.extensions import db
from commons.settings.extensions import migrate
from commons.settings.config import config_map
from app.resources import register_blueprints
def create_app(config_name):
"""Application factory, used to create application"""
app = Flask(__name__, static_folder="../static", template_folder="..")
config_class = config_map.get(config_name)
app.config.from_object(config_class)
configure_extensions(app)
configure_apispec(app)
register_blueprints(app)
return app
def configure_extensions(app):
"""configure flask extensions"""
db.init_app(app)
migrate.init_app(app, db)
def configure_apispec(app):
"""Configure APISpec for swagger support"""
apispec.init_app(app, security=[{"jwt": []}])
apispec.spec.components.security_scheme(
"jwt", {"type": "http", "scheme": "bearer", "bearerFormat": "JWT"}
)
apispec.spec.components.schema(
"PaginatedResult",
{
"properties": {
"total": {"type": "integer"},
"pages": {"type": "integer"},
"next": {"type": "string"},
"prev": {"type": "string"},
}
},
)
| [
"g715450338@163.com"
] | g715450338@163.com |
a276018235cdff655b2d6d73f4a8ccf27c718a3e | 069f1daa989cd2796bdf07eae7716a7bd2267476 | /15/lattice_paths.py | 316f57ad55a0d26e9466cb8cc153f0891d0bdd9c | [] | no_license | samescolas/project-euler | 019503abb4ef5073a1bad38dbcc07aaf954615e0 | ddde07e998e5f3d911c30fb5cf63e3d76501e34a | refs/heads/master | 2020-12-02T06:18:40.897650 | 2017-12-10T22:43:41 | 2017-12-10T22:43:41 | 96,815,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/python
import sys
from math import pow
def factorial(n):
if n < 1:
return 1
return n * factorial(n - 1)
size = 20 if len(sys.argv) == 1 else int(sys.argv[1])
print("Number of paths on {}x{} grid: {}".format(size, size,
int(factorial(2*size) / pow(factorial(size), 2))))
| [
"samescolas@gmail.com"
] | samescolas@gmail.com |
611a198b6cec04fdddfb406e56a5c9bfb18e4eed | 3402ee0e96b9d1d2ae00cb3a0f001d399411f634 | /Server/data_layer/data_layer.py | ab70ff6a55f260a56fd11e102856e4f1517c7c66 | [
"MIT"
] | permissive | kerenren/Turbo-Kanban | d5854464e30014b1c7e5d2748734cb73bfaaf87f | 1a15382b33336f9a86a4b4b147aad12ab0b05265 | refs/heads/master | 2023-02-07T22:08:52.540796 | 2020-12-28T14:01:18 | 2020-12-28T14:01:18 | 319,048,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #!/bin/python3
import os
def get_dir_info_by_path(path):
result_files = []
walk_obj = os.walk(path)
for items in walk_obj:
root = items[0]
files = items[2]
for child_file in files:
file_path = os.path.join(root, child_file)
file_statinfo = os.stat(file_path)
last_modified_sec = file_statinfo.st_mtime
size = file_statinfo.st_size
result_files.append({'file_path': file_path, 'last_modified_sec': last_modified_sec, 'size': size})
return result_files
def validate_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return True
return False
| [
"renkelei.kelly@gmail.com"
] | renkelei.kelly@gmail.com |
4c4458a3f8cea7e2f3c299b6152409241cc9459f | f320dfb79830a3dc52dd674b16cb319223ee9b47 | /user_profiles/migrations/0002_auto_20191026_2242.py | f2c01999c1a380552b3d90250b1d2bd981efebe6 | [] | no_license | yk220284/django | 91bfcf4fe238d49c81ac46323988f501a4017e9f | 046af69a19a11804fa5b5ed2a801fab3af21bca8 | refs/heads/master | 2020-08-28T18:42:12.484929 | 2019-10-27T01:24:31 | 2019-10-27T01:24:31 | 217,788,582 | 0 | 0 | null | 2019-10-27T01:24:33 | 2019-10-27T01:05:09 | Python | UTF-8 | Python | false | false | 668 | py | # Generated by Django 2.2.6 on 2019-10-26 22:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='student',
name='college',
field=models.CharField(default='no college provided', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='student',
name='subject',
field=models.CharField(default='no subject provided', max_length=50),
preserve_default=False,
),
]
| [
"Yankang@MacdeMacBook-Air.local"
] | Yankang@MacdeMacBook-Air.local |
72791e17e71456aade20cc9cc4e32de6523e144b | 34f5146e25144d4ceced8af38b5de2f8fff53fdd | /ui/mainwindow.py | 158a3002e7c464033c18697b89cd33491f8128a1 | [] | no_license | fadiga/mstock | 3271eeb0b8339b27347bbb70b96bc1f161ed6901 | a5f621ed58bd881d9a232498ef23762a5f9c186f | refs/heads/master | 2021-05-25T11:56:28.430965 | 2017-09-25T19:08:27 | 2017-09-25T19:08:27 | 39,653,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
# maintainer: Fad
from __future__ import (
unicode_literals, absolute_import, division, print_function)
from PyQt4.QtGui import QIcon
from PyQt4.QtCore import Qt
from Common.ui.common import FMainWindow, QToolBadgeButton
from ui.menutoolbar import MenuToolBar
from ui.menubar import MenuBar
from Common.ui.statusbar import GStatusBar
from ui.dashboard import DashbordViewWidget
from configuration import Config
class MainWindow(FMainWindow):
def __init__(self):
FMainWindow.__init__(self)
self.setWindowIcon(QIcon.fromTheme(
'logo', QIcon(u"{}".format(Config.APP_LOGO))))
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
self.toolbar = MenuToolBar(self)
self.addToolBar(Qt.LeftToolBarArea, self.toolbar)
self.statusbar = GStatusBar(self)
self.setStatusBar(self.statusbar)
self.page = DashbordViewWidget
self.change_context(self.page)
def page_width(self):
return self.width() - 100
def add_badge(self, msg, count):
b = QToolBadgeButton(self)
b.setText(msg)
b.setCounter(count)
self.toolbar.addWidget(b)
def exit(self):
self.logout()
self.close()
def active_menu(self):
self.menubar = MenuBar(self)
self.setMenuBar(self.menubar)
| [
"ibfadiga@gmail.com"
] | ibfadiga@gmail.com |
a8a654e2f4bef5e6c0b3f42a9577d868d6002d9e | c893333a4fd8b15c9e3c8d7d6cc0ec375cc11bb7 | /nuke/fxpipenukescripts/archive.py | 13fcd78d5b2b32eabb19bf536cbe8e7ca0cd8dbb | [] | no_license | khpang/vfxpipe | 63af3545157897afa8eb307984056f9c916017e8 | fde184a8f4cb5b4c98108bb1b2f2e8fd92cbafeb | refs/heads/master | 2021-01-24T01:21:02.329898 | 2013-01-30T02:35:22 | 2013-01-30T02:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,242 | py | import shutil
import threading
import time
import nukescripts
import nuke
import os
import datetime
shotData = True
class archiveInterface():
def interface(self):
# set up the new script name
scriptName = os.path.basename(nuke.value('root.name'))
date = datetime.date.today()
formattedDate = '%s%02d%02d' % (date.year, int(date.month), int(date.day))
archivePath = 'z:/job/after_earth/prod/io/archive/%s/%s/' % (formattedDate, scriptName.replace('.nk',''))
self.panel = nukescripts.PythonPanel('Archive script 1.01')
self.file = nuke.File_Knob('Output','Output folder:')
self.file.setValue(archivePath)
self.panel.addKnob(self.file)
self.scriptName = nuke.String_Knob('name','Script name:',scriptName)
self.panel.addKnob(self.scriptName)
self.log = nuke.Boolean_Knob('log','Generate log:',True)
self.panel.addKnob(self.log)
self.comment = nuke.Multiline_Eval_String_Knob('comment','Comments:')
self.panel.addKnob(self.comment)
result = self.panel.showModalDialog()
self.scriptInfo = nukescripts.get_script_data()
if result:
self.convertGizmosToGroups()
self.action()
def action(self):
readList = []
writeList= []
fbxList= []
if os.path.exists(self.file.value() + '/') == False:
os.makedirs(self.file.value())
nuke.scriptSaveAs(self.file.value() + '/' + self.scriptName.value())
readToCopy= []
writeToCopy= []
self.scriptRoot = '''[file dirname [knob root.name]]'''
DESTINATION = self.file.value()
LAYERS = DESTINATION + 'LAYERS/'
FBX = DESTINATION + 'GEO/'
WRITE = DESTINATION + 'WRITE/'
# Read
for n in nuke.allNodes('Read'):
if n.knob('file').value() not in readList:
if n.knob('disable').value() == False:
readList.append(nuke.filenameFilter(n.knob('file').value()))
for p in readList:
if os.path.exists(os.path.dirname(p)):
for f in os.listdir(os.path.dirname(p)):
if os.path.splitext(f)[-1] == os.path.splitext(p)[-1]:
if len(f.split('.')[0]) == len(os.path.basename(p).split('.')[0]):
path = '/'.join([os.path.dirname(p),os.path.basename(f)])
if os.path.isfile(path):
readToCopy.append(path)
#FBX
for n in nuke.allNodes():
if n.Class() in ['ReadGeo2','Camera2','Axis2','WriteGeo']:
if n.knob('file').value():
if n.knob('file').value() not in fbxList:
if n.knob('disable').value() == False:
fbxList.append(nuke.filenameFilter(n.knob('file').value()))
#Write
'''
for n in nuke.allNodes('Write'):
if n.knob('file').value() not in writeList:
if n.knob('disable').value() == False:
if n.knob('file').value() != '':
if os.path.isdir( os.path.dirname( n.knob('file').value() ) ):
writeList.append(nuke.filenameFilter(n.knob('file').value()))
'''
for p in writeList:
if os.path.exists(os.path.dirname(p)):
for f in os.listdir(os.path.dirname(p)):
if os.path.splitext(f)[-1] == os.path.splitext(p)[-1]:
if f.split('.')[0] == os.path.basename(p).split('.')[0]:
path = '/'.join([os.path.dirname(p),os.path.basename(f)])
if os.path.isfile(path):
writeToCopy.append(path)
self.copyDic = {}
for p in readToCopy:
folder = os.path.dirname(p).split('/')[-1] + '/'
if os.path.exists(LAYERS + folder) == False:
os.makedirs(LAYERS + folder)
self.copyDic[p] = [LAYERS + folder + os.path.basename(p),os.path.getsize(p)]
for p in fbxList:
if os.path.exists(FBX) == False:
os.makedirs(FBX)
#shutil.copy( p , FBX + os.path.basename(p) )
self.copyDic[p] = [FBX + os.path.basename(p),os.path.getsize(p)]
for p in writeToCopy:
folder = os.path.dirname(p).split('/')[-1] + '/'
if os.path.exists(WRITE + folder) == False:
os.makedirs(WRITE + folder)
#shutil.copy( p , WRITE + folder + os.path.basename(p) )
self.copyDic[p] = [WRITE + folder + os.path.basename(p),os.path.getsize(p)]
threading.Thread( None, self.action2 ).start()
def action2(self):
task = nuke.ProgressTask("Copying")
task.setMessage('fsdf')
lenght = len(self.copyDic)
x = 0.0
totalSize = 0.0
for k,v in self.copyDic.iteritems():
totalSize+= v[1]
totalSize = round((totalSize/1000000000),2)
toGoSize = 0.0
myList = []
for i in self.copyDic:
myList.append(i)
myList.sort()
for i in myList:
p = int((x/lenght)*100)
task.setProgress(p)
toGoSize = toGoSize + self.copyDic[i][1]
progressStr = ' (%s/%s)' % (int(x),lenght)
size = ' '+str(round((toGoSize/1000000000),2))+' / ' +str(totalSize) +' GB'
task.setMessage(os.path.basename(i) + progressStr +size)
shutil.copy( i,self.copyDic[i][0])
x+=1
if task.isCancelled():
nuke.executeInMainThread( nuke.message, args=( "Canceled" ) )
break
self.replacePath()
def replacePath(self):
for n in nuke.allNodes():
if n.Class() in ['ReadGeo2','Camera2','Axis2','WriteGeo']:
a = n.knob('file').value()
a = a.replace( os.path.dirname(a) , self.scriptRoot+'/GEO')
n.knob('file').setValue(a)
for n in nuke.allNodes('Read'):
a = n.knob('file').value()
a = a.replace( '/'.join(os.path.dirname(a).split('/')[0:-1]) , self.scriptRoot+'/LAYERS')
n.knob('file').setValue(a)
for n in nuke.allNodes('Write'):
a = n.knob('file').value()
a = a.replace( '/'.join(os.path.dirname(a).split('/')[0:-1]) , self.scriptRoot+'/WRITE')
n.knob('file').setValue(a)
nuke.scriptSave("")
if self.log.value():
self.generateLog()
def generateLog(self):
if self.comment.value() != '':
note = 'Notes\n\n' + self.comment.value() + '\n\n===================\n\n'
else:
note = self.comment.value()
nodeInfo = ''
a = {}
b = []
c = []
for n in nuke.allNodes():
a[n.Class()] = 0
for n in nuke.allNodes():
c.append(n.Class())
for i in a:
b.append(i)
b.sort()
for i in c:
a[i] +=1
for i in b:
nodeInfo = nodeInfo + '('+str(a[i])+')' + ' ' + i +'\n'
stats = nukescripts.get_script_data()
logFile = open(self.file.value()+ 'log.txt','w')
logFile.write(note+nodeInfo+'\n\n\n\n'+stats)
logFile.close()
def convertGizmosToGroups(self):
###Node Selections
nodeSelection = nuke.allNodes()
noGizmoSelection = []
gizmoSelection = []
for n in nodeSelection:
if 'gizmo_file' in n.knobs():
gizmoSelection.append(n)
else:
noGizmoSelection.append(n)
groupSelection = []
for n in gizmoSelection:
bypassGroup = False
###Current Status Variables
nodeName = n.knob('name').value()
nodeXPosition = n['xpos'].value()
nodeYPosition = n['ypos'].value()
nodeHideInput = n.knob('hide_input').value()
nodeCached = n.knob('cached').value()
nodePostageStamp = n.knob('postage_stamp').value()
nodeDisable = n.knob('disable').value()
nodeDopeSheet = n.knob('dope_sheet').value()
nodeDependencies = n.dependencies()
nodeMaxInputs = n.maxInputs()
inputsList = []
###Current Node Isolate Selection
for i in nodeSelection:
i.knob('selected').setValue(False)
n.knob('selected').setValue(True)
nuke.tcl('copy_gizmo_to_group [selected_node]')
###Refresh selections
groupSelection.append(nuke.selectedNode())
newGroup = nuke.selectedNode()
###Paste Attributes
newGroup.knob('xpos').setValue(nodeXPosition)
newGroup.knob('ypos').setValue(nodeYPosition)
newGroup.knob('hide_input').setValue(nodeHideInput)
newGroup.knob('cached').setValue(nodeCached)
newGroup.knob('postage_stamp').setValue(nodePostageStamp)
newGroup.knob('disable').setValue(nodeDisable)
newGroup.knob('dope_sheet').setValue(nodeDopeSheet)
###Connect Inputs
for f in range(0, nodeMaxInputs):
inputsList.append(n.input(f))
for num, r in enumerate(inputsList):
newGroup.setInput(num, None)
for num, s in enumerate(inputsList):
newGroup.setInput(num, s)
n.knob('name').setValue('temp__'+nodeName+'__temp')
newGroup.knob('name').setValue(nodeName)
newGroup.knob('selected').setValue(False)
###Cleanup (remove gizmos, leave groups)
for y in gizmoSelection:
y.knob('selected').setValue(True)
nukescripts.node_delete(popupOnError=False)
for z in groupSelection:
z.knob('selected').setValue(True)
for w in noGizmoSelection:
w.knob('selected').setValue(True)
ai = archiveInterface()
| [
"throb@throb.net"
] | throb@throb.net |
7ea19293b8c1d06c2af7679ddb75fb3ef9ba13b1 | dcbb43c577fdd963638fdeb3d1042912725f5e1d | /venv/Scripts/pip3-script.py | f1fcd00d229b401b3d22581a13ab71f40c18657d | [] | no_license | ag5300cm/SaumPi2 | 1ac42b1bdcc3d36df8473936817554830f1b094b | 6f7528109ef5e15a70c5b708dba630cc07c9f511 | refs/heads/master | 2020-03-28T11:15:42.453172 | 2018-09-12T21:30:12 | 2018-09-12T21:30:12 | 148,194,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\Benjamin\PycharmProjects\SaumPi\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"bhillenbrand101@gmail.com"
] | bhillenbrand101@gmail.com |
af5136259e7632ef6c77f2dedcef1585efd50be9 | ce93c4d770792714810127e337f6a56ddce1aed2 | /plugins/gedit2/fuzzyopen/fuzzyopen/suggestion.py | 77982e7ffcbfd43d1bdb16abb5d25bf4e7bc0a95 | [] | no_license | mereghost/gmate | bb8f7bce3f48bb96489babac6410024cd5673d6f | a986da119eb153e76578539f2dbcad6738f1a92a | refs/heads/master | 2021-01-16T20:47:45.944601 | 2012-01-26T19:30:12 | 2012-01-26T19:30:12 | 1,666,484 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | """
Class for suggestions
"""
import os
import subprocess
import gio, gtk
from util import debug
import util
max_result = 15
class FuzzySuggestion:
def __init__( self, filepath, show_hidden=False, git=False ):
self._filepath = filepath
self._show_hidden = show_hidden
self._git = git and util.config('use_git')
self._excluded = util.config('ignore_ext').split(',')
self._ignore_case = util.config('ignore_case')
self._ignore_space = util.config('ignore_space')
if self._git:
self._load_git()
self._load_file()
def _load_file( self ):
self._fileset = []
for dirname, dirnames, filenames in os.walk( self._filepath ):
if not self._show_hidden:
for d in dirnames[:]:
if d[0] == '.':
dirnames.remove(d)
path = os.path.relpath( dirname, self._filepath )
for filename in filenames:
if (self._show_hidden or filename[0] != '.'):
if os.path.splitext( filename )[-1][1:] not in self._excluded:
self._fileset.append( os.path.normpath(os.path.join( path, filename ) ) )
self._fileset = sorted( self._fileset )
debug("Loaded files count = %d" % len(self._fileset))
def _load_git( self ):
self._git_with_diff = subprocess.Popen(["git", "diff", "--numstat", "--relative"], cwd=self._filepath, stdout=subprocess.PIPE).communicate()[0].split('\n')[:-1]
debug("Git file path: %s" % self._filepath)
self._git_with_diff = [ s.strip().split('\t') for s in self._git_with_diff ]
self._git_files = [ s[2] for s in self._git_with_diff ]
def suggest( self, sub ):
if self._ignore_space:
sub = sub.replace(' ', '')
suggestion = []
for f in self._fileset:
highlight, score = self._match_score( sub, f )
if score >= len(sub):
suggestion.append((highlight, f, score))
suggestion = sorted(suggestion, key=lambda x: x[2], reverse=True)[:max_result]
debug("Suggestion count = %d" % len(suggestion))
return [ self._metadata(s) for s in suggestion ]
def _metadata( self, suggestion ):
target = os.path.join(self._filepath, suggestion[1])
time_string = util.relative_time(os.stat(target).st_mtime)
highlight = "<span size='x-large'>" + suggestion[0] + "</span>\n" + self._token_string( suggestion[1] ) + "MODIFY " + time_string
if self._git and (suggestion[1] in self._git_files):
index = self._git_files.index(suggestion[1])
highlight += self._git_string(index)
file_icon = gio.File(os.path.join(self._filepath, suggestion[1])).query_info('standard::icon').get_icon()
icon = gtk.icon_theme_get_default().lookup_by_gicon(file_icon, 40, gtk.ICON_LOOKUP_USE_BUILTIN)
return (icon and icon.load_icon(), highlight, suggestion[1])
def _token_string( self, file ):
token = os.path.splitext(file)[-1]
if token != '':
token = token[1:]
else:
token = '.'
return "<span variant='smallcaps' foreground='#FFFFFF' background='#B2B2B2'><b> " + token.upper() + ' </b></span> '
def _git_string( self, line_id ):
add = int(self._git_with_diff[line_id][0])
delete = int(self._git_with_diff[line_id][1])
if add != 0 or delete != 0:
return " GIT <tt><span foreground='green'>" + ('+' * add) + "</span><span foreground='red'>" + ('-' * delete) + "</span></tt>"
else:
return ""
def _match_score( self, sub, str ):
result, score, pos, git, orig_length, highlight = 0, 0, 0, 0, len(str), ''
for c in sub:
while str != '' and not self._match(str[0], c):
score = 0
highlight += str[0]
str = str[1:]
if str == '':
return (highlight, 0)
score += 1
result += score
pos += len(str)
highlight += "<b>" + str[0] + "</b>"
str = str[1:]
highlight += str
if len(sub) != 0 and orig_length > 1:
pos = float(pos-1) / ((float(orig_length)-1.0) * float(len(sub)))
else:
pos = 0.0
if self._git and (str in self._git_files):
git = 1
return (highlight, float(result) + pos + git)
def _match(self, a, b):
if self._ignore_case:
return a.lower() == b.lower()
else:
return a == b
| [
"andrewliu33@gmail.com"
] | andrewliu33@gmail.com |
6ab0738ddfb5846b6196cc354c896871968b71f9 | eb84ba23b8412ba4071c86b1e32dd28f39d423d2 | /mugen-backend/Mugen/wsgi.py | 55b49e2e9f63651c671f185545168b06b1f039e5 | [] | no_license | tsukuyomi3/Mugen-Player | 5b6135a81aa33a6f1983011301ddf9dbd504d14d | b61de1fc11f46c3284feac2a7ce0228038669b2f | refs/heads/main | 2021-06-05T04:41:20.410202 | 2020-10-06T11:42:54 | 2020-10-06T11:42:54 | 101,651,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for Mugen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Mugen.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
9749962025eab7f8672f220e73430cd6450798d6 | 373102b585532c54f24df422521638f81761d26d | /refugio/forms.py | 7cf39b61ed2c3fe274718e3eb2a51fee1864dfb7 | [] | no_license | programacionparaaprender/refugio2 | d58beb92b23df537dacbdecea5ace30072256646 | f4f3874e1cf1b4493ebe4ed1ffd4ac268239a54a | refs/heads/master | 2022-11-12T22:37:13.861491 | 2020-07-05T22:04:55 | 2020-07-05T22:04:55 | 277,390,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | from django import forms
from django.contrib.auth.models import User
""" class PasswordForm(forms.ModelForm):
class Meta:
model = Mascota
fields = [
'nombre',
'sexo',
'edad_aproximada',
'fecha_rescate',
'persona',
'vacuna',
]
labels = {
'nombre':'Nombre',
'sexo':'Sexo',
'edad_aproximada':'edad aproximada',
'fecha_rescate':'fecha de rescate',
'persona':'Adoptante',
'vacuna':'Vacunas',
}
widgets = {
'nombre': forms.TextInput(attrs={'class':'form-control'}),
'sexo': forms.TextInput(attrs={'class':'form-control'}),
'edad_aproximada': forms.TextInput(attrs={'class':'form-control'}),
'fecha_rescate': forms.TextInput(attrs={'class':'form-control'}),
'persona':forms.Select(attrs={'class':'form-control'}),
'vacuna':forms.CheckboxSelectMultiple(),
} """ | [
"yancel209@gmail.com"
] | yancel209@gmail.com |
1e97caa9740ddd276af8721952d53c64e6237066 | de8b832a3c804837300b9974dc0151d9294fa573 | /code/experiment/GenderSoundNet/ex18_1_1_1_1_1_1_1_1_1_1_1_1_1_1/genderSoundNet.py | d318188bf1b8d63c08f77854ab0b089a4eff19a9 | [] | no_license | YuanGongND/Deep_Speech_Visualization | fcff2ac93e5adffd707b98eb7591f50fe77c1274 | 73a79e3596d9a5ee338eafb9a87b227696de25d1 | refs/heads/master | 2021-07-19T23:00:36.294817 | 2017-10-28T01:04:59 | 2017-10-28T01:04:59 | 105,332,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,377 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 21:03:04 2017
Conduct erxperiment on IEMOCAP, three labels:
96001: emotion(0-4, 5 = other emotions)
96002: speaker(0-9)
96003: gender(male=0, female=1)
@author: Kyle
"""
import os
from sys import argv
_, newFolderName, gpuI = argv
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuI)
import sys
sys.path.append("../../model/")
import soundNet
import waveCNN
sys.path.append("../")
import expUtil
import numpy as np
from keras import backend as K
import matplotlib.pyplot as plt
import shutil
#%% creat folder to save model, the code, and model configuration
while os.path.isdir( newFolderName ):
newFolderName = newFolderName + '_1'
print( 'exist' )
os.mkdir( newFolderName )
shutil.copy( os.path.basename(__file__), newFolderName ) # copy this file to the new folder
shutil.copy( '../../model/soundNet.py', newFolderName )
shutil.copy( '../../model/waveCNN.py', newFolderName )
shutil.copy( '../expUtil.py', newFolderName )
# put all configuratation here
thisTask = 'gender'
dataType = 'toyWaveform'
# define the model
model = soundNet.soundNet # define the model
#model = waveCNN.waveCNN
# according to the configuaration, change the coresponding setting
#if thisTask == 'emotion':
# trainNewFolderName = newFolderName
# load data
trainFeature, trainLabel, testFeature, testLabel = expUtil.loadData( testFolder = 4, testTask = thisTask, precision = 'original', sampleRate = 16000, dataType = dataType )
#%% grid search
#batch_sizeList = [ 32, 24, 16 ]
#learningRateList = [ 1e-3, 5e-4, 1e-4, 5e-5, 1e-5 ]
#initList = [ 'RandomUniform', 'lecun_normal', 'lecun_uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform' ]
batch_sizeList = [ 32 ]
learningRateList = [ 1e-4 ]
initList = [ 'glorot_normal' ]
for batch_size in batch_sizeList:
resultList = [ ]
for learningRate in learningRateList:
for init in initList:
tempFolderName = newFolderName + '/' + str( learningRate ) + '_' + str( batch_size ) + '_' + init
os.mkdir( tempFolderName )
# train the model
resultOnTrain, resultOnTest = expUtil.train( testFeature, testLabel, trainFeature, trainLabel, iteration_num = 100, \
lr_decay = 0.1, batch_size = batch_size, learningRate = learningRate, iterationNum = 100, \
modelT = model, newFolderName = tempFolderName, init = keras.initializers.Constant(value=0.01), saveSign = True, denseUnitNum = 64, \
dataType = dataType )
resultList.append( resultOnTest[ -1 ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
resultList = np.array( resultList )
resultList.resize( [ len( learningRateList ), len( initList ) ] )
np.savetxt( newFolderName + '\_' + str( batch_size ) +'_gridSearch.csv', resultList, delimiter = ',' )
#%% start test
testSamples = testFeature.shape[ 0 ]
trainSamples = trainFeature.shape[ 0 ]
log = 'testSample_num = ' + str( testSamples ) + '\n trainSample_num = ' + str( trainSamples )
with open( newFolderName + '/log.txt' , "w") as text_file:
text_file.write( log )
| [
"ygong1@nd.edu"
] | ygong1@nd.edu |
3eb21352e9d9a3dcc23572a98430da9b90e4f9aa | db818127b373da9d88583e717f184f483a1f844d | /instruction_env/Lib/site-packages/numpydoc/tests/test_validate.py | b7127ce2012e8419e79f0df9f71e724b944e2723 | [
"MIT"
] | permissive | lfunderburk/Effective-Instructions | 4af5a763b5021668abd6d37f1d860eeff07bfee8 | ce40f890fb8623ff1ec9c3e9e1190505cbd1e6db | refs/heads/main | 2023-04-14T22:43:48.363281 | 2021-04-26T05:40:19 | 2021-04-26T05:40:19 | 331,163,652 | 0 | 0 | MIT | 2021-04-26T05:40:22 | 2021-01-20T01:58:52 | null | UTF-8 | Python | false | false | 32,871 | py | import pytest
import numpydoc.validate
import numpydoc.tests
validate_one = numpydoc.validate.validate
class GoodDocStrings:
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
See Also
--------
AnotherClass : With its description.
Examples
--------
>>> result = 1 + 1
"""
def one_liner(self):
"""Allow one liner docstrings (including quotes)."""
# This should fail, but not because of the position of the quotes
pass
def plot(self, kind, color="blue", **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot, e.g.::
'foo'
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def swap(self, arr, i, j, *args, **kwargs):
"""
Swap two indicies on an array.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
arr : list
The list having indexes swapped.
i, j : int
The indexes being swapped.
*args, **kwargs
Extraneous parameters are being permitted.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
- Make sure you set a seed for reproducibility
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
.. versionadded:: 0.1
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
int
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
Examples
--------
>>> 1 + 1
2
"""
return 1
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
int
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def summary_starts_with_number(self, n=5):
"""
2nd rule of summaries should allow this.
3 Starting the summary with a number instead of a capital letter.
Also in parameters, returns, see also...
Parameters
----------
n : int
4 Number of values to return.
Returns
-------
int
5 Subset of the original series with the n first values.
See Also
--------
tail : 6 Return the last n elements of the Series.
Examples
--------
>>> s = 10
>>> s
10
7 With the `n` parameter, we can change the number of returned rows:
>>> s + 1
11
"""
return 1
def contains(self, pat, case=True, na=float('NaN')):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
See Also
--------
related : Something related.
Examples
--------
>>> s = 25
>>> s
25
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s + 1
26
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s * 2
50
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure reST directives don't affect checks for leading periods.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
def no_returns(self):
"""
Say hello and have no returns.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
def empty_returns(self):
"""
Say hello and always return None.
Since this function never returns a value, this
docstring doesn't need a return section.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
def say_hello():
return "Hello World!"
say_hello()
if True:
return
else:
return None
def multiple_variables_on_one_line(self, matrix, a, b, i, j):
"""
Swap two values in a matrix.
The extended summary can be multiple paragraphs, but just one
is enough to pass the validation.
Parameters
----------
matrix : list of list
A double list that represents a matrix.
a, b : int
The indicies of the first value.
i, j : int
The indicies of the second value.
See Also
--------
related : Something related.
Examples
--------
>>> result = 1 + 1
"""
pass
class BadGenericDocStrings:
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def two_linebreaks_between_sections(self, foo):
"""
Test linebreaks message GL03.
Note 2 blank lines before parameters section.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def linebreak_at_end_of_docstring(self, foo):
"""
Test linebreaks message GL03.
Note extra blank line at end of docstring.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def unknown_section(self):
"""
This section has an unknown section title.
Unknown Section
---------------
This should raise an error in the validation.
"""
def sections_in_wrong_order(self):
"""
This docstring has the sections in the wrong order.
Parameters
----------
name : str
This section is in the right position.
Examples
--------
>>> print('So far Examples is good, as it goes before Parameters')
So far Examples is good, as it goes before Parameters
See Also
--------
function : This should generate an error, as See Also needs to go
before Examples.
"""
def deprecation_in_wrong_order(self):
"""
This docstring has the deprecation warning in the wrong order.
This is the extended summary. The correct order should be
summary, deprecation warning, extended summary.
.. deprecated:: 1.0
This should generate an error as it needs to go before
extended summary.
"""
def method_wo_docstrings(self):
pass
def directives_without_two_colons(self, first, second):
"""
Ensure reST directives have trailing colons.
Parameters
----------
first : str
Sentence ending in period, followed by single directive w/o colons.
.. versionchanged 0.1.2
second : bool
Sentence ending in period, followed by multiple directives w/o
colons.
.. versionadded 0.1.2
.. deprecated 0.00.0
"""
pass
class BadSummaries:
def no_summary(self):
"""
Returns
-------
int
Always one.
"""
def heading_whitespaces(self):
"""
Summary with heading whitespaces.
Returns
-------
int
Always one.
"""
def wrong_line(self):
"""Quotes are on the wrong line.
Both opening and closing."""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters:
"""
Everything here has a problem with its Parameters section.
"""
def no_type(self, value):
"""
Lacks the type.
Parameters
----------
value
A parameter without type.
"""
def type_with_period(self, value):
"""
Has period after type.
Parameters
----------
value : str.
A parameter type should not finish with period.
"""
def no_description(self, value):
"""
Lacks the description.
Parameters
----------
value : str
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
def bad_parameter_spacing(self, a, b):
"""
The parameters on the same line have an extra space between them.
Parameters
----------
a, b : int
Foo bar baz.
"""
pass
class BadReturns:
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
def named_single_return(self):
"""
Provides name but returns only one value.
Returns
-------
s : str
A nice greeting.
"""
return "Hello world!"
def no_capitalization(self):
"""
Forgets capitalization in return values description.
Returns
-------
foo : str
The first returned string.
bar : str
the second returned string.
"""
return "Hello", "World!"
def no_period_multi(self):
"""
Forgets period in return values description.
Returns
-------
foo : str
The first returned string
bar : str
The second returned string.
"""
return "Hello", "World!"
class BadSeeAlso:
def no_desc(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail
"""
pass
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples:
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> import datetime
>>> value = datetime.date(2019,1,1)
"""
pass
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "numpydoc.tests.test_validate"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_one_liner(self, capsys):
result = validate_one(self._import_path(klass="GoodDocStrings", func='one_liner'))
errors = " ".join(err[1] for err in result["errors"])
assert 'should start in the line immediately after the opening quotes' not in errors
assert 'should be placed in the line after the last text' not in errors
def test_good_class(self, capsys):
errors = validate_one(self._import_path(klass="GoodDocStrings"))["errors"]
assert isinstance(errors, list)
assert not errors
@pytest.mark.parametrize(
"func",
[
"plot",
"swap",
"sample",
"random_letters",
"sample_values",
"head",
"head1",
"summary_starts_with_number",
"contains",
"mode",
"good_imports",
"no_returns",
"empty_returns",
"multiple_variables_on_one_line",
],
)
def test_good_functions(self, capsys, func):
errors = validate_one(self._import_path(klass="GoodDocStrings", func=func))[
"errors"
]
assert isinstance(errors, list)
assert not errors
def test_bad_class(self, capsys):
errors = validate_one(self._import_path(klass="BadGenericDocStrings"))["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"func",
[
"func",
"astype",
"astype1",
"astype2",
"astype3",
"plot",
"directives_without_two_colons",
],
)
def test_bad_generic_functions(self, capsys, func):
errors = validate_one(
self._import_path(klass="BadGenericDocStrings", func=func) # noqa:F821
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
# See Also tests
(
"BadGenericDocStrings",
"unknown_section",
('Found unknown section "Unknown Section".',),
),
(
"BadGenericDocStrings",
"sections_in_wrong_order",
(
"Sections are in the wrong order. Correct order is: Parameters, "
"See Also, Examples",
),
),
(
"BadGenericDocStrings",
"deprecation_in_wrong_order",
("Deprecation warning should precede extended summary",),
),
(
"BadGenericDocStrings",
"directives_without_two_colons",
(
"reST directives ['versionchanged', 'versionadded', "
"'deprecated'] must be followed by two colons",
),
),
(
"BadSeeAlso",
"no_desc",
('Missing description for See Also "Series.tail" reference',),
),
(
"BadSeeAlso",
"desc_no_period",
('Missing period at end of description for See Also "Series.iloc"',),
),
(
"BadSeeAlso",
"desc_first_letter_lowercase",
('should be capitalized for See Also "Series.tail"',),
),
# Summary tests
(
"BadSummaries",
"no_summary",
("No summary found",),
),
(
"BadSummaries",
"heading_whitespaces",
("Summary contains heading whitespaces",),
),
(
"BadSummaries",
"wrong_line",
("should start in the line immediately after the opening quotes",
"should be placed in the line after the last text"),
),
("BadSummaries", "no_punctuation", ("Summary does not end with a period",)),
(
"BadSummaries",
"no_capitalization",
("Summary does not start with a capital letter",),
),
(
"BadSummaries",
"no_capitalization",
("Summary must start with infinitive verb",),
),
("BadSummaries", "multi_line", ("Summary should fit in a single line",)),
(
"BadSummaries",
"two_paragraph_multi_line",
("Summary should fit in a single line",),
),
# Parameters tests
(
"BadParameters",
"no_type",
('Parameter "value" has no type',),
),
(
"BadParameters",
"type_with_period",
('Parameter "value" type should not finish with "."',),
),
(
"BadParameters",
"no_description",
('Parameter "value" has no description',),
),
(
"BadParameters",
"missing_params",
("Parameters {'**kwargs'} not documented",),
),
(
"BadParameters",
"bad_colon_spacing",
(
'Parameter "kind" requires a space before the colon '
"separating the parameter name and type",
),
),
(
"BadParameters",
"no_description_period",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"no_description_period_with_directive",
('Parameter "kind" description should finish with "."',),
),
(
"BadParameters",
"parameter_capitalization",
('Parameter "kind" description should start with a capital letter',),
),
(
"BadParameters",
"integer_parameter",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"string_parameter",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"boolean_parameter",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "bool" instead of "boolean"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "int" instead of "integer"',),
),
(
"BadParameters",
"list_incorrect_parameter_type",
('Parameter "kind" type should use "str" instead of "string"',),
),
(
"BadParameters",
"bad_parameter_spacing",
("Parameters {'b'} not documented", "Unknown parameters {' b'}"),
),
pytest.param(
"BadParameters",
"blank_lines",
("No error yet?",),
marks=pytest.mark.xfail,
),
# Returns tests
("BadReturns", "return_not_documented", ("No Returns section found",)),
("BadReturns", "yield_not_documented", ("No Yields section found",)),
pytest.param("BadReturns", "no_type", ("foo",), marks=pytest.mark.xfail),
("BadReturns", "no_description", ("Return value has no description",)),
(
"BadReturns",
"no_punctuation",
('Return value description should finish with "."',),
),
(
"BadReturns",
"named_single_return",
(
"The first line of the Returns section should contain only the "
"type, unless multiple values are being returned",
),
),
(
"BadReturns",
"no_capitalization",
("Return value description should start with a capital letter",),
),
(
"BadReturns",
"no_period_multi",
('Return value description should finish with "."',),
),
(
"BadGenericDocStrings",
"method_wo_docstrings",
("The object does not have a docstring",),
),
(
"BadGenericDocStrings",
"two_linebreaks_between_sections",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
(
"BadGenericDocStrings",
"linebreak_at_end_of_docstring",
(
"Double line break found; please use only one blank line to "
"separate sections or paragraphs, and do not leave blank lines "
"at the end of docstrings",
),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
with pytest.warns(None) as w:
result = validate_one(self._import_path(klass=klass, func=func))
if len(w):
assert all('Unknown section' in str(ww.message) for ww in w)
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
class TestDocstringClass:
@pytest.mark.parametrize("invalid_name", ["unknown_mod", "unknown_mod.MyClass"])
def test_raises_for_invalid_module_name(self, invalid_name):
msg = 'No module can be imported from "{}"'.format(invalid_name)
with pytest.raises(ImportError, match=msg):
numpydoc.validate.Docstring(invalid_name)
@pytest.mark.parametrize(
"invalid_name", ["datetime.BadClassName", "datetime.bad_method_name"]
)
def test_raises_for_invalid_attribute_name(self, invalid_name):
name_components = invalid_name.split(".")
obj_name, invalid_attr_name = name_components[-2], name_components[-1]
msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
with pytest.raises(AttributeError, match=msg):
numpydoc.validate.Docstring(invalid_name)
| [
"hanh.tong@tap.work"
] | hanh.tong@tap.work |
cd77c134d12365a252030bc1ea72ea577fe53e8a | 91ee7224de43029922f1396592bdf7802fe1e15e | /TrainingPadaX/python-flask-login/app.py | 4ced875ee0672caa28a2879a13fa6d2f89dcd994 | [] | no_license | Soniafish/Soniafish.github.io | 83aefae6b0180fa0e2f39c8dbdcbb799889d7cec | 1174dbd87a7aeae3489cfbd899752c3517ffe20a | refs/heads/master | 2021-09-13T01:13:56.406930 | 2021-08-17T14:04:16 | 2021-08-17T14:04:16 | 72,912,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | # !/usr/bin/python2
# coding:utf-8
from flask import Flask
from flask import request
from flask import redirect
from flask import render_template
from flask import session
app=Flask(__name__)
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
@app.route("/")
def index():
if 'username' in session:
return redirect("/member")
else:
return render_template("default.html")
@app.route("/signin", methods=["POST", "GET"])
def handel_signin():
if request.method == 'POST':
account=request.form["account"]
password=request.form["pword"]
if account=="test" and password=="test":
session['username'] = account
return redirect("/member")
else:
return redirect("/error")
else:
if 'username' in session:
return redirect("/member")
else:
return redirect("/default")
@app.route("/member")
def check_member():
# if session["username"]=="test":
if 'username' in session:
return render_template("member.html")
else:
return redirect("/")
@app.route("/error")
def show_error():
return render_template("error.html")
@app.route('/signout')
def signout():
# session.pop('username', "")
session.pop('username', None)
return render_template("default.html")
print("Logged in as %s")
app.run(port=3000) | [
"sonia.lin@kantech.com.tw"
] | sonia.lin@kantech.com.tw |
555bdcfd6c04b099c7185c359968f2d00f2e12c9 | c5d1001b485a23a38d6acf4184ab1254d7c3484f | /grayscale/9thresholds/mainMultipleModelsLinear.py | 85eb5424f0374e5cb90d98ef29486bc263e0ce78 | [] | no_license | timurt/BinarizedNet | 74e1cdc52147c404e9458617e90c992296866e47 | 4bb8210a0c42088553231c33b430eb2a0fa60b37 | refs/heads/master | 2020-03-11T01:28:00.984756 | 2018-04-26T06:52:31 | 2018-04-26T06:52:31 | 129,692,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,129 | py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torchvision import datasets, transforms
from torch.autograd import Variable
from binaryNet import Binary_W, Binary, Threshold
import shutil
import matplotlib
from matplotlib.colors import hsv_to_rgb
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=40, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
data_folder = '../../data'
best_prec1 = 0.0
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_folder, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(data_folder, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
def to_np(x):
return x.data.cpu().numpy()
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def threshold(data, lowerBound, upperBound):
INF = -100
output = data.clone()
output[output < lowerBound] = INF
output[output >= upperBound] = INF
output[output != INF] = 1
output[output == INF] = -1
return output
def grayscale(data, dtype='float32'):
# luma coding weighted average in video systems
r, g, b = np.asarray(.3, dtype=dtype), np.asarray(.59, dtype=dtype), np.asarray(.11, dtype=dtype)
rst = r * data[:, 0, :, :] + g * data[:, 1, :, :] + b * data[:, 2, :, :]
# add channel dimension
rst = rst[:, np.newaxis, :, :]
return torch.FloatTensor(rst)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename + '_latest.pth.tar')
if is_best:
shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(20 * 25, 50)
self.fc2 = nn.Linear(50, 10)
self.bn0 = nn.BatchNorm2d(1)
self.bn1 = nn.BatchNorm2d(10)
self.bn2 = nn.BatchNorm2d(20)
self.bn3 = nn.BatchNorm1d(50)
def forward(self, x):
x, w = self.binary_w(x, self.conv1)
x = F.conv2d(x, w)
x = F.tanh(F.max_pool2d(self.bn1(x), 2))
x, w = self.binary_w(x, self.conv2)
x = F.conv2d(x, w)
x = F.tanh(F.max_pool2d(self.bn2(x), 2))
x = self.binary(x)
x = x.view(-1, 20 * 25)
#x = F.tanh(self.bn3(self.fc1(x)))
# x = self.binary(x)
#x = self.fc2(x)
return x
def binary(self, input):
return Binary()(input)
def binary_w(self, input, param):
return Binary_W()(input, param.weight)
class FcNet(nn.Module):
def __init__(self):
super(FcNet, self).__init__()
self.fc1 = nn.Linear(7500, 1500)
self.fc2 = nn.Linear(1500, 512)
self.fc3 = nn.Linear(512, 90)
self.fc4 = nn.Linear(90, 10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
return x
thresholds = 3
minVal = -1.0
maxVal = 1.0
models = []
r = np.linspace(minVal, maxVal, num=thresholds+1, endpoint=True)
for j in range(1, len(r)):
lr = 0.01
model = Net()
if args.cuda:
model.cuda()
file_name = 'BINARY_CAMILA_' + str(j) + '_' + str(len(r) - 1) + '_best.pth.tar'
checkpoint = torch.load(file_name)
model.load_state_dict(checkpoint['state_dict'])
models.append(model)
model = FcNet()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
criterion = nn.CrossEntropyLoss()
i = 0
def train(epoch):
model.train()
for i in range(0, len(models)):
models[i].eval()
step = (epoch-1)*len(train_loader.dataset)/100
for batch_idx, (data, target) in enumerate(train_loader):
data = grayscale(data)
outputs = [];
for i in range(0, len(models)):
from_limit = r[i]
to_limit = r[i + 1]
inp = threshold(data, from_limit, to_limit);
if args.cuda:
inp = inp.cuda()
inp = Variable(inp)
out = models[i](inp)
outputs.append(out.cpu().data.numpy())
input = np.column_stack( outputs )
input = torch.FloatTensor(input)
#input = np.stack((output1.data.numpy(), output2.data.numpy(), output3.data.numpy()), axis=1)
if args.cuda:
input, target = input.cuda(), target.cuda()
input, target = Variable(input), Variable(target)
optimizer.zero_grad()
output = model(input)
#loss = F.nll_loss(output, target)
loss = criterion(output, target)
if loss.data[0]<10.0:
#print ('True')
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.00f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# Compute accuracy
_, argmax = torch.max(output, 1)
def adjust_learning_rate(lr, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
lr = lr * (0.1 ** (epoch // 13))
print ('Learning rate: ' + str(lr))
# log to TensorBoard
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def test(epoch):
global best_prec1
model.eval()
for i in range(0, len(models)):
models[i].eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data = grayscale(data)
outputs = []
for i in range(0, len(models)):
from_limit = r[i]
to_limit = r[i + 1]
inp = threshold(data, from_limit, to_limit);
if args.cuda:
inp = inp.cuda()
inp = Variable(inp)
out = models[i](inp)
outputs.append(out.cpu().data.numpy())
input = np.column_stack( outputs)
input = torch.FloatTensor(input)
if args.cuda:
input, target = input.cuda(), target.cuda()
input, target = Variable(input, volatile=True), Variable(target)
output = model(input)
test_loss += criterion(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Best ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy, best_prec1))
is_best = accuracy > best_prec1
best_prec1 = max(accuracy, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, 'BINARY_CAMILA_LINEAR')
for epoch in range(1, args.epochs + 1):
adjust_learning_rate(args.lr, optimizer, epoch)
train(epoch)
test(epoch)
| [
"timurtibeyev@gmail.com"
] | timurtibeyev@gmail.com |
b4aec3c94de4ef1c9d0804e58f30aa47e9eeb51c | 22c6303398fe9d3a01ea2e2dee56a7c51ffb8106 | /src/StyleVarPane.py | caec6f9ba7540e07c20e8041d2bf85d34f9bbcfc | [] | no_license | prrg/BEE2.4 | 07c3d96b58bda8d7b4383d46778d01bcf970a5e4 | ffd30eb140e04db781229b27992aaed4385b438b | refs/heads/master | 2020-04-01T18:45:12.625402 | 2018-10-04T05:28:13 | 2018-10-04T05:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,859 | py | from tkinter import *
from tk_tools import TK_ROOT
from tkinter import ttk
from collections import namedtuple
import functools
import operator
import img as png
from BEE2_config import GEN_OPTS
from SubPane import SubPane
import packageLoader
import tooltip
import utils
import itemconfig
from typing import Union
stylevar = namedtuple('stylevar', 'id name default desc')
# Special StyleVars that are hardcoded into the BEE2
# These are effectively attributes of Portal 2 itself, and always work
# in every style.
styleOptions = [
# ID, Name, default value
stylevar(
id='MultiverseCave',
name=_('Multiverse Cave'),
default=1,
desc=_('Play the Workshop Cave Johnson lines on map start.')
),
stylevar(
id='FixFizzlerBump',
name=_('Prevent Portal Bump (fizzler)'),
default=0,
desc=_('Add portal bumpers to make it more difficult to portal across '
'fizzler edges. This can prevent placing portals in tight '
'spaces near fizzlers, or fizzle portals on activation.')
),
stylevar(
id='NoMidVoices',
name=_('Suppress Mid-Chamber Dialogue'),
default=0,
desc=_('Disable all voicelines other than entry and exit lines.')
),
stylevar(
id='UnlockDefault',
name=_('Unlock Default Items'),
default=0,
desc=_('Allow placing and deleting the mandatory Entry/Exit Doors and '
'Large Observation Room. Use with caution, this can have weird '
'results!')
),
stylevar(
id='AllowGooMist',
name=_('Allow Adding Goo Mist'),
default=1,
desc=_('Add mist particles above Toxic Goo in certain styles. This can '
'increase the entity count significantly with large, complex '
'goo pits, so disable if needed.')
),
stylevar(
id='FunnelAllowSwitchedLights',
name=_('Light Reversible Excursion Funnels'),
default=1,
desc=_('Funnels emit a small amount of light. However, if multiple funnels '
'are near each other and can reverse polarity, this can cause '
'lighting issues. Disable this to prevent that by disabling '
'lights. Non-reversible Funnels do not have this issue.'),
),
stylevar(
id='EnableShapeSignageFrame',
name=_('Enable Shape Framing'),
default=1,
desc=_('After 10 shape-type antlines are used, the signs repeat. '
'With this enabled, colored frames will be added to '
'distinguish them.'),
),
]
checkbox_all = {}
checkbox_chosen = {}
checkbox_other = {}
tk_vars = {}
VAR_LIST = []
STYLES = {}
window = None
UI = {}
def update_filter():
pass
def add_vars(style_vars, styles):
"""
Add the given stylevars to our list.
"""
VAR_LIST.clear()
VAR_LIST.extend(
sorted(style_vars, key=operator.attrgetter('id'))
)
for var in VAR_LIST: # type: packageLoader.StyleVar
var.enabled = GEN_OPTS.get_bool('StyleVar', var.id, var.default)
for style in styles:
STYLES[style.id] = style
def set_stylevar(var):
"""Save the value for a particular stylevar."""
val = str(tk_vars[var].get())
GEN_OPTS['StyleVar'][var] = val
if var == 'UnlockDefault':
update_filter()
def make_desc(var: Union[packageLoader.StyleVar, stylevar], is_hardcoded=False):
"""Generate the description text for a StyleVar.
This adds 'Default: on/off', and which styles it's used in.
"""
if var.desc:
desc = [var.desc, '']
else:
desc = []
desc.append(
_('Default: On')
if var.default else
_('Default: Off')
)
if is_hardcoded or var.styles is None:
desc.append(_('Styles: Unstyled'))
else:
app_styles = [
style
for style in
STYLES.values()
if var.applies_to_style(style)
]
if len(app_styles) == len(STYLES):
desc.append(_('Styles: All'))
else:
style_list = sorted(
style.selitem_data.short_name
for style in
app_styles
)
desc.append(
ngettext('Style: {}', 'Styles: {}', len(style_list)
).format(', '.join(style_list)))
return '\n'.join(desc)
def refresh(selected_style):
"""Move the stylevars to the correct position.
This depends on which apply to the current style.
"""
en_row = 0
dis_row = 0
for var in VAR_LIST:
if var.applies_to_all():
continue # Always visible!
if var.applies_to_style(selected_style):
checkbox_chosen[var.id].grid(
row=en_row,
sticky="W",
padx=3,
)
checkbox_other[var.id].grid_remove()
en_row += 1
else:
checkbox_chosen[var.id].grid_remove()
checkbox_other[var.id].grid(
row=dis_row,
sticky="W",
padx=3,
)
dis_row += 1
if en_row == 0:
UI['stylevar_chosen_none'].grid(sticky='EW')
else:
UI['stylevar_chosen_none'].grid_remove()
if dis_row == 0:
UI['stylevar_other_none'].grid(sticky='EW')
else:
UI['stylevar_other_none'].grid_remove()
def flow_stylevar(e=None):
UI['style_can']['scrollregion'] = UI['style_can'].bbox(ALL)
def make_pane(tool_frame):
"""Create the styleVar pane.
"""
global window
window = SubPane(
TK_ROOT,
options=GEN_OPTS,
title=_('Style/Item Properties'),
name='style',
resize_y=True,
tool_frame=tool_frame,
tool_img=png.png('icons/win_stylevar'),
tool_col=3,
)
UI['nbook'] = nbook = ttk.Notebook(window)
nbook.grid(row=0, column=0, sticky=NSEW)
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=1)
nbook.enable_traversal()
stylevar_frame = ttk.Frame(nbook)
stylevar_frame.rowconfigure(0, weight=1)
stylevar_frame.columnconfigure(0, weight=1)
nbook.add(stylevar_frame, text=_('Styles'))
UI['style_can'] = Canvas(stylevar_frame, highlightthickness=0)
# need to use a canvas to allow scrolling
UI['style_can'].grid(sticky='NSEW')
window.rowconfigure(0, weight=1)
UI['style_scroll'] = ttk.Scrollbar(
stylevar_frame,
orient=VERTICAL,
command=UI['style_can'].yview,
)
UI['style_scroll'].grid(column=1, row=0, rowspan=2, sticky="NS")
UI['style_can']['yscrollcommand'] = UI['style_scroll'].set
utils.add_mousewheel(UI['style_can'], stylevar_frame)
canvas_frame = ttk.Frame(UI['style_can'])
frame_all = ttk.Labelframe(canvas_frame, text=_("All:"))
frame_all.grid(row=0, sticky='EW')
frm_chosen = ttk.Labelframe(canvas_frame, text=_("Selected Style:"))
frm_chosen.grid(row=1, sticky='EW')
ttk.Separator(
canvas_frame,
orient=HORIZONTAL,
).grid(row=2, sticky='EW', pady=(10, 5))
frm_other = ttk.Labelframe(canvas_frame, text=_("Other Styles:"))
frm_other.grid(row=3, sticky='EW')
UI['stylevar_chosen_none'] = ttk.Label(
frm_chosen,
text=_('No Options!'),
font='TkMenuFont',
justify='center',
)
UI['stylevar_other_none'] = ttk.Label(
frm_other,
text=_('None!'),
font='TkMenuFont',
justify='center',
)
all_pos = 0
for all_pos, var in enumerate(styleOptions):
# Add the special stylevars which apply to all styles
tk_vars[var.id] = IntVar(
value=GEN_OPTS.get_bool('StyleVar', var.id, var.default)
)
checkbox_all[var.id] = ttk.Checkbutton(
frame_all,
variable=tk_vars[var.id],
text=var.name,
command=functools.partial(set_stylevar, var.id)
)
checkbox_all[var.id].grid(row=all_pos, column=0, sticky="W", padx=3)
tooltip.add_tooltip(
checkbox_all[var.id],
make_desc(var, is_hardcoded=True),
)
for var in VAR_LIST:
tk_vars[var.id] = IntVar(value=var.enabled)
args = {
'variable': tk_vars[var.id],
'text': var.name,
'command': functools.partial(set_stylevar, var.id)
}
desc = make_desc(var)
if var.applies_to_all():
# Available in all styles - put with the hardcoded variables.
all_pos += 1
checkbox_all[var.id] = check = ttk.Checkbutton(frame_all, **args)
check.grid(row=all_pos, column=0, sticky="W", padx=3)
tooltip.add_tooltip(check, desc)
else:
# Swap between checkboxes depending on style.
checkbox_chosen[var.id] = ttk.Checkbutton(frm_chosen, **args)
checkbox_other[var.id] = ttk.Checkbutton(frm_other, **args)
tooltip.add_tooltip(
checkbox_chosen[var.id],
desc,
)
tooltip.add_tooltip(
checkbox_other[var.id],
desc,
)
UI['style_can'].create_window(0, 0, window=canvas_frame, anchor="nw")
UI['style_can'].update_idletasks()
UI['style_can'].config(
scrollregion=UI['style_can'].bbox(ALL),
width=canvas_frame.winfo_reqwidth(),
)
if utils.USE_SIZEGRIP:
ttk.Sizegrip(
window,
cursor=utils.CURSORS['stretch_vert'],
).grid(row=1, column=0)
UI['style_can'].bind('<Configure>', flow_stylevar)
item_config_frame = ttk.Frame(nbook)
nbook.add(item_config_frame, text=_('Items'))
itemconfig.make_pane(item_config_frame)
| [
"spencerb21@live.com"
] | spencerb21@live.com |
7e064e4f1d6fd411ba34c8d74ef4610795f58dba | 2d4cb0621e5bb9680144562694aedea1fd4d0d86 | /artificial/train.py | c65139818f50dde87ecd8ed76e56b7986d6c5862 | [] | no_license | ForeverZyh/robust_rnn | a29f539b2d77a85e7ad509791e89429da7e23958 | a6afcd2e3814b8eff66e43be3119d994c989b7d0 | refs/heads/master | 2020-09-15T14:19:22.692429 | 2019-12-28T03:56:58 | 2019-12-28T03:56:58 | 223,473,847 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import tensorflow as tf
import numpy as np
from simple_rnn_model import params, SimpleRNNModel1
from utils import get_one_hot
training_X = np.load("./X_train.npy")
training_y = np.load("./y_train.npy")
training_num = len(training_X)
test_X = np.load("./X_test.npy")
test_y = np.load("./y_test.npy")
nb_classes = 2
training_Y = get_one_hot(training_y, nb_classes)
test_Y = get_one_hot(test_y, nb_classes)
params["max_len"] = 100
params["D"] = 2
params["conv_layer2_nfilters"] = 3
model = SimpleRNNModel1(params, 30, nb_classes)
model.model.fit(x=training_X, y=training_Y, batch_size=64, epochs=10, callbacks=[model.early_stopping], verbose=2,
validation_data=(test_X[:500], test_Y[:500]), shuffle=True)
model.model.save_weights(filepath="./models/rnn_tiny")
| [
"yuhao.zhang@wisc.edu"
] | yuhao.zhang@wisc.edu |
6a9573d123ee3c2eab2c86427659496281d46515 | d90b10f4fcd0199bf064edf2b46a77dbc64b2a50 | /PythonGitPrograms/5-Inheritance, Multiple Inheritance.py | 3a263f20e107af833200f4061b3f94f5fa0724a1 | [] | no_license | satyam93sinha/PythonBasics | f13d56f84d77523b9d77e989ed09dd2d61446220 | e07499884f617f2717d6aea75818db07f8f1584f | refs/heads/master | 2020-04-22T19:53:38.449222 | 2019-03-18T13:02:04 | 2019-03-18T13:02:04 | 170,622,798 | 0 | 2 | null | 2019-10-01T16:51:48 | 2019-02-14T03:44:47 | Python | UTF-8 | Python | false | false | 426 | py | class Base1:
a, b = 5, 9
print('Base1, a:{}, b:{}'.format(a, b))
class Base2:
#print("Base2.mro:{}".format(Base2.mro()))
a, c = 2, 3
print("Base2, a:{}, c:{}".format(a, c))
class Child1(Base2):
d, e = 1, 'go'
print("Child1, d:{}, e:{}".format(d, e))
class Child2(Child1, Base2, Base1):
print("Child2")
print("Child2.mro():{}".format(Child2.mro()))
print("Child1.mro:{}".format(Child1.mro()))
| [
"satyamkumarsinha9@gmail.com"
] | satyamkumarsinha9@gmail.com |
0d689174ed920a61e2ae5b29e1be0b018ef5fb2d | 4676ab33b20fb9206e12bee6c12dc01d2331effb | /main.spec | 692a47201ba61b89d9274edac73300215ef5d923 | [] | no_license | acer-king/Goal-detection | 94cbf531adbbdbc182dd123fe5f1d9b4d73f112d | a138cd153acf569c73577d03dbf91a511aee1bb9 | refs/heads/master | 2022-12-03T03:35:50.762543 | 2020-08-17T17:36:43 | 2020-08-17T17:36:43 | 288,245,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['D:\\upwork\\indian\\detecting goal'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"acerking03@gmail.com"
] | acerking03@gmail.com |
857cffe6d12559c9007f01ee6dc6f8354189063e | 93c55344aa8e527600a906e118b7db45f7e3f208 | /10-matplotlib/labels.py | 0913636d7521c7a1e972838cbaa630fa8be5ec32 | [] | no_license | skolakoda/ucimo-data-nauku | 5b6642894967bc5f460b6d509a826bacfc66928c | 0e1530d5654b49706e1a34b38e288cf972ad23fa | refs/heads/master | 2021-09-19T18:33:45.194310 | 2018-07-30T13:08:56 | 2018-07-30T13:08:56 | 114,565,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import matplotlib.pyplot as plt
year = [1950, 1951, 1952, 2100]
pop = [2.538, 2.57, 2.62, 10.85]
plt.plot(year, pop)
plt.xlabel('Year')
plt.ylabel('Population')
plt.title('World Population Projections')
plt.show()
| [
"mudroljub@gmail.com"
] | mudroljub@gmail.com |
2ba4b6eb69fd971459ad3d9a0373b59660585f26 | 8b1e05154bdbe7aa595310c6cc5ab7ec84be88d5 | /contrib/zmq/zmq_sub.py | d1e7d5ca10f658bb882adb7e6afca5d704fa7a20 | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | AustraliaCash/AustraliaCash-Core | c2ab806db3c123c41882aacd1af23109cce6b4db | 5e31845eea27eecd06135ddd873d4f37fba9ee60 | refs/heads/master | 2023-07-09T20:26:35.387036 | 2023-06-26T20:27:24 | 2023-06-26T20:27:24 | 157,548,102 | 9 | 6 | MIT | 2023-06-26T20:27:25 | 2018-11-14T12:50:16 | C++ | UTF-8 | Python | false | false | 3,406 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The AustraliaCash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
AustraliaCash should be started with the command line arguments:
bitcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubsequence=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
topic, body, seq = await self.zmqSubSocket.recv_multipart()
sequence = "Unknown"
if len(seq) == 4:
sequence = str(struct.unpack('<I', seq)[-1])
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(body.hex())
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(body.hex())
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(body[:80].hex())
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(body.hex())
elif topic == b"sequence":
hash = body[:32].hex()
label = chr(body[32])
mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]
print('- SEQUENCE ('+sequence+') -')
print(hash, label, mempool_sequence)
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| [
"31876349+farsider350@users.noreply.github.com"
] | 31876349+farsider350@users.noreply.github.com |
9c770fd102ad3cc35dacc09a06de543cc152e717 | 8c9e166fdbc213c8a9121bef3cac962cb94a5cc3 | /Lab 9 28Sep/lab_ise_gui/lab_ise_gui/calculator.py | 826deb28faad56102736e28fdca1d2c67325d76a | [] | no_license | nesarasr/Information-Systems-Lab | e7e5d94c5da3163376dc08ef2486c0eb88db90e8 | 6f86f6095228ab937247abe36db7b28fe2fe572c | refs/heads/master | 2022-04-12T00:25:01.904127 | 2020-03-28T10:50:53 | 2020-03-28T10:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 15:42:07 2019
@author: Satyajit
"""
from PyQt5 import QtGui , uic ,QtWidgets , QtCore
import pyqtgraph
import sys
import numpy as np
from functools import partial
def exit_func():
sys.exit(0)
if __name__=='__main__':
a=str()
app=QtWidgets.QApplication(sys.argv)
dlg=uic.loadUi("calculator.ui") #Main Window
dlg.setWindowTitle('calculator')
# dlg.one.clicked.connect(partial(on_tab,'1'))
# dlg.two.clicked.connect(partial(on_tab,'2'))
# dlg.three.clicked.connect(partial(on_tab,'3'))
# dlg.four.clicked.connect(partial(on_tab,'4'))
# dlg.five.clicked.connect(partial(on_tab,'5'))
# dlg.six.clicked.connect(partial(on_tab,'6'))
# dlg.seven.clicked.connect(partial(on_tab,'7'))
# dlg.eight.clicked.connect(partial(on_tab,'8'))
# dlg.nine.clicked.connect(partial(on_tab,'9'))
# dlg.zero.clicked.connect(partial(on_tab,'0'))
dlg.exit.clicked.connect(exit_func)
dlg.show()
app.exec_() | [
"noreply@github.com"
] | noreply@github.com |
4e98cba026ffbfa488d602586ed1fb56b70a4b3e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/117/usersdata/168/26292/submittedfiles/al2.py | 2636de789a2d58357263296f2d1affdab045e0ff | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from __future__ import division
#INICIE SEU CDIGO AQUI
n=float(input('Digite n'))
n1=int(n)
n2=n-int
print('%.2f'%n1)
print('%.2f'%n2)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1872c2b02787510ea089a882647d262201237e43 | e7f708af4b599ec6763e0d3b311e2cb47cc155d8 | /payments/admin.py | f69953885eba02d24436b82c8477468a8e0d0cfd | [] | no_license | dmontoya1/tu-licencia | d48bc8779d8cda50c7a382cb1c14e2ae3668ebc8 | d436d665ba797d7b90fcdcc58bcef3e79b917682 | refs/heads/master | 2023-05-14T16:38:52.408066 | 2020-06-08T20:24:39 | 2020-06-08T20:24:39 | 371,433,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# from .models import Invoice
# class InvoiceAdmin(admin.ModelAdmin):
# list_display = ('__unicode__', 'user', 'release_date', 'is_discharged', 'payment_status')
# readonly_fields = ('release_date', )
# search_fields = ('release_date', 'payu_reference_code')
# admin.site.register(Invoice, InvoiceAdmin) | [
"dmontoya@apptitud.com.co"
] | dmontoya@apptitud.com.co |
9eeafd4847d7b39c112746a6e759becc2bf0aef7 | f17c4d393617f0b2d4303b24e59f7fa867b6e483 | /migrations/versions/a0ef55bfc5a9_.py | c2bdd3f0b49e65ba44c8d12f0b696d8d3648a802 | [] | no_license | kodespy/kapes | 611de2451ddc7f930c6075746375640aedb656ab | 95119d89290422efccf72e565524b75742645778 | refs/heads/master | 2021-01-22T17:48:12.899716 | 2017-08-18T17:39:46 | 2017-08-18T17:39:46 | 100,735,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | """empty message
Revision ID: a0ef55bfc5a9
Revises: None
Create Date: 2017-08-18 08:12:53.661047
"""
# revision identifiers, used by Alembic.
revision = 'a0ef55bfc5a9'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('socios',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('documento', sa.Integer(), nullable=True),
sa.Column('nombres', sa.String(), nullable=True),
sa.Column('celular', sa.String(), nullable=True),
sa.Column('club', sa.String(), nullable=True),
sa.Column('admin', sa.Boolean(), nullable=True),
sa.Column('fecha', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('documento')
)
op.create_table('aportes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('idsocio', sa.Integer(), nullable=True),
sa.Column('monto', sa.Integer(), nullable=True),
sa.Column('mes', sa.Integer(), nullable=True),
sa.Column('fecha', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.ForeignKeyConstraint(['idsocio'], ['socios.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('aportes')
op.drop_table('socios')
# ### end Alembic commands ###
| [
"kodespy@gmail.com"
] | kodespy@gmail.com |
8d33dfc0c8f80b0f4cd63f78511920a1f5adc4ab | ca0b950d4232132c340c75513809fa4f72c52b0e | /animations/color/base.py | 219dfc936d7e23b3c1ae58d7bc9b918f5d57462e | [
"MIT"
] | permissive | LeLuxNet/GridPy | e32cebd157823669b001499263e1788993c0cf13 | 5f4d02d2b254be1f0682b724a96a99009a415308 | refs/heads/master | 2022-12-21T08:20:03.913594 | 2020-09-23T18:31:44 | 2020-09-23T18:31:44 | 260,533,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | class ColorGeneration:
def generate(self):
return None
def generate_list(self, amount):
colors = []
for i in range(amount):
colors.append(self.generate())
return colors
class IndexColorGeneration(ColorGeneration):
def __init__(self, max_index):
self.max_index = max_index
self.index = 0
def generate(self):
color = self.generate_index(self.index)
self.index += 1
if self.index >= self.max_index:
self.index = 0
return color
def generate_index(self, index):
return None
| [
"ginnythecat@lelux.net"
] | ginnythecat@lelux.net |
dd0f144786bc13cb2bcfd0f6f6dc726d47af6982 | 4100e7b9349b611c1922b37891355c46ef376f1e | /Rivet/script_and_example/fix_e-_yodas.py | 36cba4143c4aa22296611e862ef088107bda2a64 | [] | no_license | kdlong/LesHouchesVBSstudies | 8bc0c888056a232e3ea9371cccdd300d0a9f314c | 5ee351546520896935a2e53480034c40def668fc | refs/heads/master | 2021-03-16T09:39:05.363696 | 2019-05-13T03:10:15 | 2019-05-13T03:10:15 | 113,029,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import sys
analysisfile = open(sys.argv[1],'r')
new_analysisfile = open("fixed_"+sys.argv[1],'w')
prestring=""
found_histo=False
header=False
wrong_histo=False
print "Start adapting the histogram {0}.".format(sys.argv[1])
for line in analysisfile:
if line.find('END YODA_HISTO1D') != -1:
line.strip('\n')
if not wrong_histo:
new_analysisfile.write(line)
found_histo = False
header = False
wrong_histo = False
prestring = ""
continue
if line.find('BEGIN YODA_HISTO1D') != -1 and line.find('WmZ_OF') != -1:
found_histo = True
header = True
prestring+=line
continue
if (found_histo and line.find('Total') != -1):
header=False
line.strip('\n')
numEvts=line.split()
if float(numEvts[-1]) >= 1000.:
prestring.strip('\n')
new_analysisfile.write(prestring)
else:
wrong_histo=True
if (found_histo and header):
prestring+=line
continue
if (found_histo and wrong_histo):
continue
line.strip('\n');
new_analysisfile.write(line)
analysisfile.close()
new_analysisfile.close()
| [
"stephan.braeuer@stud.uni-goettingen.de"
] | stephan.braeuer@stud.uni-goettingen.de |
015f94220909b436deb31345160eebc80132c586 | 3ab1f37b4372d0796c85ef24343dd8c03accb6ef | /OddEvenLinkedList.py | 5004fdb3d7cf1ccbd577d641dc11c9e1fe6a488c | [] | no_license | Blossomyyh/leetcode | 2be6a99534801fc59fe9551317ca49c3704b1c3d | 38615779eb43d147587467e11dc22761ac0726cb | refs/heads/master | 2023-01-22T16:56:26.624677 | 2020-11-20T13:47:43 | 2020-11-20T13:47:43 | 266,845,278 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
if not head or head.next == None: return head
odd, even, second = head, head.next, head.next
while odd and odd.next and even and even.next:
odd.next = odd.next.next
even.next = even.next.next
odd = odd.next
even = even.next
odd.next = second
return head
node = ListNode(1, ListNode(2, ListNode(3, ListNode(4))))
Solution().oddEvenList(node)
| [
"blossomyyh@163.com"
] | blossomyyh@163.com |
d50572d4090c707e69adf085200b9658c585e677 | d9803bd42d8edc8fd089b89588fdd2033cfb3fb0 | /GISJob/items.py | f6727c91947caf098c54d563ca06722592793253 | [] | no_license | pengshaowei/GISJob | d718a66a1dacbc5dbac1084594cb572205d0d457 | 8d94f1fe25d41880d643923233b2569dfc7bd91e | refs/heads/master | 2020-03-27T06:15:38.437874 | 2016-06-29T06:43:08 | 2016-06-29T06:43:08 | 62,027,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GisjobItem(scrapy.Item):
url = scrapy.Field()
job = scrapy.Field() #岗位名称
location = scrapy.Field() #工作地点
salary = scrapy.Field() #薪资
ins = scrapy.Field() #公司名称
instype = scrapy.Field() #公司类型
jingyan = scrapy.Field() #经验要求
xueli = scrapy.Field() #学历要求
recruitnum = scrapy.Field() #招收人数
releasetime = scrapy.Field() #发布时间
language = scrapy.Field() #语言能力要求
zhuanye = scrapy.Field() #专业要求
labels = scrapy.Field() #岗位福利标签
jobinfo = scrapy.Field() #岗位详细信息
contactinfo = scrapy.Field() #联系方式
insinfo = scrapy.Field() #公司信息
| [
"www.pengshaoweip@qq.com"
] | www.pengshaoweip@qq.com |
ca30c61d529defb6e21e782cba161535f29b1a71 | c538092a01585082ecfdf8845886faea3985f067 | /duckietown_utils/rllib_callbacks.py | 85090a0f568738d85d21c62e0f49e1f851cce444 | [
"MIT"
] | permissive | BoAi01/Duckietown-RL | ef3fdd8205e499b1a68d592fed0934a8c9cba3e7 | 434787a1caf1e5a8ec1d45a30c1ec8eb35ff91ba | refs/heads/master | 2023-04-11T02:15:32.067588 | 2021-04-23T15:30:33 | 2021-04-23T15:30:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,969 | py | """
RLlib callbacks used to log custrom metrics and a hack to save trajectory data.
"""
__license__ = "MIT"
__copyright__ = "Copyright (c) 2020 András Kalapos"
import logging
import numpy as np
import ray.rllib as rllib
from ray.tune.logger import pretty_print
from ray.tune.result import TRAINING_ITERATION, TIMESTEPS_TOTAL
logger = logging.getLogger(__name__)
from .trajectory_plot import correct_gym_duckietown_coordinates
from .env import resolve_multimap_name
from .wrappers.simulator_mod_wrappers import ObstacleSpawningWrapper
# The actions returned by the rllib agent are not clipped to be within action space bounds, thus they can be arbitrarily
# large. To make the histograms correct the actions should be clipped to the action space bound.
# ACTION_HISTOGRAM_LIMITS should hold the upper and lower bound of the action space.
ACTION_HISTOGRAM_LIMITS = [-1., 1.]
def on_episode_start(info):
"""
If adding new histograms, don't forget to edit on_train_result below (to prevent data accumulation over iterations).
:param info:
:return:
"""
# info-keys: 'env', 'policy', 'episode'
episode = info['episode'] # type: rllib.evaluation.episode.MultiAgentEpisode
episode.user_data['robot_speed'] = []
episode.user_data['robot_cur_pos'] = []
episode.user_data['deviation_centerline'] = []
episode.user_data['deviation_heading'] = []
episode.user_data['distance_travelled'] = []
episode.user_data['distance_travelled_any'] = []
episode.user_data['proximity_penalty'] = []
episode.user_data['collision_risk_step_cnt'] = 0
episode.user_data['reward_orientation'] = []
episode.user_data['reward_velocity'] = []
episode.user_data['reward_collision_avoidance'] = []
# Custom histogram data
# episode.hist_data['action_prob'] = []
episode.hist_data['sampled_actions'] = []
episode.hist_data['_robot_coordinates'] = []
def on_episode_step(info):
episode = info['episode'] # type: rllib.evaluation.episode.MultiAgentEpisode
# info-keys: 'env', 'episode'
episode.hist_data['sampled_actions'].append(np.clip(episode.last_action_for(),
ACTION_HISTOGRAM_LIMITS[0], ACTION_HISTOGRAM_LIMITS[1]))
env_info = episode.last_info_for()
# {'Simulator': {'action': [array([0.96753883], dtype=float32), array([1.], dtype=float32)],
# 'lane_position': {'dist': -0.09179686463148151,
# 'dot_dir': 0.9997813004067312,
# 'angle_deg': 1.1983109648377053,
# 'angle_rad': 0.020914471799167954},
# 'robot_speed': 0.0,
# 'proximity_penalty': 0,
# 'cur_pos': [3.859709301028824, 0.0, 4.362296864631481],
# 'cur_angle': 3.1206781817906233,
# 'wheel_velocities': [array([1.1610466], dtype=float32), array([1.2], dtype=float32)],
# 'timestamp': 0.03333333333333333,
# 'tile_coords': [6, 7],
# 'msg': ''}}
if env_info is not None:
episode.user_data['robot_speed'].append(env_info['Simulator']['robot_speed'])
episode.user_data['proximity_penalty'].append(env_info['Simulator']['proximity_penalty'])
if env_info['Simulator']['proximity_penalty'] < 0.:
episode.user_data['collision_risk_step_cnt'] += 1
episode.user_data['reward_orientation'].append(env_info.get('custom_rewards', {}).get('orientation', 0.))
episode.user_data['reward_velocity'].append(env_info.get('custom_rewards', {}).get('velocity', 0.))
episode.user_data['reward_collision_avoidance'].append(env_info.get('custom_rewards', {}).get('collision_avoidance', 0.))
# If the robot is "not in a lane", the lane position key is not added to the simulator info dictionary
# see gym_duckietown.simulator.Simulator.get_agent_info() (line 1318)
if 'lane_position' in env_info['Simulator'].keys():
episode.user_data['deviation_centerline'].append(abs(env_info['Simulator']['lane_position']['dist']))
episode.user_data['deviation_heading'].append(abs(env_info['Simulator']['lane_position']['angle_deg']))
cur_pos = env_info['Simulator']['cur_pos']
sim = info['env'].get_unwrapped()[0].unwrapped
corrected_cur_pos = correct_gym_duckietown_coordinates(sim, cur_pos)
episode.user_data['robot_cur_pos'].append(corrected_cur_pos)
dist_travelled = 0. # Distance traveled in the correct right side lane
dist_travelled_any = 0. # Distance traveled anywhere on the road
if 'lane_position' in env_info['Simulator'].keys():
if len(episode.user_data['robot_cur_pos']) > 1:
dist_travelled_any = np.linalg.norm(episode.user_data['robot_cur_pos'][-1] -
episode.user_data['robot_cur_pos'][-2], ord=2)
if env_info['Simulator']['lane_position']['dist'] > -0.1:
# driving in the correct lane
dist_travelled = dist_travelled_any
episode.user_data['distance_travelled'].append(dist_travelled)
episode.user_data['distance_travelled_any'].append(dist_travelled_any)
# try:
# policy_info = episode.last_pi_info_for()
# episode.hist_data['action_prob'].append(policy_info['action_prob'])
# except KeyError as err:
# logger.warning("KeyError {}".format(err))
def on_episode_end(info):
# info-keys: 'env', 'policy', 'episode'
episode = info['episode'] # type: rllib.evaluation.episode.MultiAgentEpisode
episode.custom_metrics['mean_robot_speed'] = np.mean(episode.user_data['robot_speed'])
episode.custom_metrics['deviation_centerline'] = np.mean(episode.user_data['deviation_centerline'])
episode.custom_metrics['deviation_heading'] = np.mean(episode.user_data['deviation_heading'])
episode.custom_metrics['distance_travelled'] = np.sum(episode.user_data['distance_travelled'])
episode.custom_metrics['distance_travelled_any'] = np.sum(episode.user_data['distance_travelled_any'])
episode.custom_metrics['proximity_penalty'] = np.sum(episode.user_data['proximity_penalty'])
episode.custom_metrics['collision_risk_step_cnt'] = episode.user_data['collision_risk_step_cnt']
episode.custom_metrics['reward_orientation'] = np.sum(episode.user_data['reward_orientation'])
episode.custom_metrics['reward_velocity'] = np.sum(episode.user_data['reward_velocity'])
episode.custom_metrics['reward_collision_avoidance'] = np.sum(episode.user_data['reward_collision_avoidance'])
# Robot coordinate data is not intended to be displayed on histograms (it's not even in the correct format for it)
# Robot coordinates are logged as histogram data because I couldn't find a better way to pass it to the loggers
# to produce the trajectory plots
episode.hist_data['_robot_coordinates'].append(episode.user_data['robot_cur_pos'])
def on_train_result(result):
"""
Histogram stats are accumulated over iterations, resulting in data from any previous iteration shaping the
histogram of this iteration. To display the histogram of data only for this iteration any previous is deleted.
This is performed for custom histograms and RLlib built in histograms as well!!!
:param result:
:return:
"""
episodes_this_iter = result['result']['episodes_this_iter']
timesteps_this_iter = result['result']['timesteps_this_iter']
# Custom histograms
result['result']['hist_stats']['sampled_actions'] = \
result['result']['hist_stats']['sampled_actions'][:timesteps_this_iter]
result['result']['hist_stats']['_robot_coordinates'] = \
result['result']['hist_stats']['_robot_coordinates'][:episodes_this_iter]
# Built in histograms
result['result']['hist_stats']['episode_lengths'] = \
result['result']['hist_stats']['episode_lengths'][:episodes_this_iter]
result['result']['hist_stats']['episode_reward'] = \
result['result']['hist_stats']['episode_reward'][:episodes_this_iter]
# curriculum_apply_update(result)
def enable_obstacles(env):
semi_unwrapped = env
while not isinstance(semi_unwrapped, ObstacleSpawningWrapper):
semi_unwrapped = semi_unwrapped.env
semi_unwrapped.env_config['spawn_obstacles']=True
def curriculum_apply_update(result):
"""Magic"""
timesteps_total = result['result'].get('timesteps_total')
if timesteps_total > 500.e+3 and timesteps_total < 550.e+3:
trainer = result["trainer"]
# Alternative: call trainer._make_workers ?
trainer.workers.foreach_worker_with_index(
lambda worker, index: worker.foreach_env(lambda env: enable_obstacles(env)))
logger.warning("Obstacle spawning enabled at timestep {}".format(timesteps_total))
| [
"kalapos.andras@gmail.com"
] | kalapos.andras@gmail.com |
9ed2a579f1ddb1d50c17e4406c162616c1ca964f | 7162dbf9809c05c67e6d0569f1644a236ee1049b | /Exercises/Daxita/Class & Object/count_object.py | cca1fb1f815f7ec9dbe48d6e8185dd2f4985b7e1 | [] | no_license | yogi-katewa/Core-Python-Training | acc20bf388b2ee664209926cbddda5db86d26682 | a30e62092f5455531a0ee8d18168f5ba39968a56 | refs/heads/master | 2021-01-13T16:36:27.151828 | 2017-01-18T07:39:38 | 2017-01-18T07:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | class Foo(object):
counter = 0
def __call__(self):
Foo.counter += 1
return (Foo.counter)
foo = Foo()
a=foo()
a=foo()
a=foo()
print("Object Called : ",a) | [
"daxita2013@gmail.com"
] | daxita2013@gmail.com |
7246a717056fa85a8c381a15a40d9fff59077e68 | 9e4fc18f267f49e1efeb69f2e95e4c4a00ce8ce6 | /Python Practice/3.1-2_FizzBuzz.py | 8a04e6282dd558c98d016cb2446bc1d3ec41931a | [] | no_license | cookiewho/IPS_Workshop_2020 | 1de50e80abbedafeb48f7907ffca8461d2d2bd4a | 4dda2febb7dfa6645042e8e7a6d1138465ad12cf | refs/heads/master | 2022-12-25T02:59:16.436013 | 2020-10-09T17:31:31 | 2020-10-09T17:31:31 | 284,307,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | '''
Given an integer, print "Fizz" if it's divisible by 3, print "Buzz"
if divisible by 5, "FizzBuzz" if divisible by both 3 & 5, "Not fizz
or buzz" otherwise (without the quotes).
'''
a = int(input())
s = ""
if a%3 == 0:
s+=("Fizz")
if a % 5 == 0:
s+=("Buzz")
if len(s) > 0:
print (s)
else:
print("Not fizz or buzz") | [
"dinkwho@gmail.com"
] | dinkwho@gmail.com |
a0ab444893ad20b5e907d2942d6ab465c68323c3 | 053441eba6ecf082d48f76807ad097e89d31b94c | /paddlevideo/modeling/losses/cross_entropy_loss.py | 325ee35b569b2d542b6fe215f04f4338fb9adab6 | [
"Apache-2.0"
] | permissive | shippingwang/PaddleVideo | 9cf4ff7c8b7f9c60fe6309c4a719698bfdba00b5 | 48e6bb5f67ad44f7ef3c5cd683e8e7b8c50f0918 | refs/heads/main | 2023-03-17T10:38:39.257643 | 2020-12-01T06:19:41 | 2020-12-01T06:19:41 | 312,490,608 | 1 | 0 | Apache-2.0 | 2020-11-13T06:21:55 | 2020-11-13T06:21:54 | null | UTF-8 | Python | false | false | 1,293 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register()
class CrossEntropyLoss(BaseWeightedLoss):
"""Cross Entropy Loss."""
def _forward(self, score, labels, **kwargs):
"""Forward function.
Args:
score (paddle.Tensor): The class score.
labels (paddle.Tensor): The ground truth labels.
kwargs: Any keyword argument to be used to calculate
CrossEntropy loss.
Returns:
loss (paddle.Tensor): The returned CrossEntropy loss.
"""
loss = F.cross_entropy(score, labels, **kwargs)
return loss
| [
"shipeng1108@163.com"
] | shipeng1108@163.com |
3ddfc499ca5c451fb47020aa62fc4830e9025699 | 51de2f4d75b645827a466a92160c69f5a5aa27cb | /Code/intermediatary codes/code_cross_correlation.py | ab14fb2f3784959e8895d9c99c1ef290da158cb4 | [] | no_license | agc-shubham/speechAnalysisAndSynthesis | 399e58ff71443abdb3233278a96532aa10a9098b | f308d9f9d23d2a6097fe31126d535ad1d25d7b97 | refs/heads/master | 2020-07-30T07:23:20.058568 | 2019-09-22T12:41:59 | 2019-09-22T12:41:59 | 210,133,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | import numpy as np
from scipy.signal import butter, lfilter, freqz
from matplotlib import pyplot as plt
from scipy.io import wavfile
from scipy.fftpack import fft,fftfreq
from matplotlib import pyplot as plt
import plotly.plotly as py
import peakutils
x=np.array([1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5])
y=np.array([1,2,3,4,5])
length1=len(x)
length2=len(y)
l=length2
r=list();
period =length1/length2
print(period)
w=0
while True:
n=0
for i,j in zip(range(w,l) , range(length2)):
n=n+x[i]*y[j]
r.append(int(n))
w=w+1
l=l+1
if(l>length1):
break
plt.figure(figsize=(30,10))
plt.plot(r,'b-',)
plt.title("Cross Correlation 2")
plt.grid()
z=np.correlate(x,y,mode='valid')
plt.figure(figsize=(30,10))
plt.plot(z,'g-',)
plt.title("Cross Correlation ")
plt.grid()
r2=list();
maxm=max(r)
w=0
l=length2
while True:
n=0
for i,j in zip(range(w,l) , range(length2)):
n=n+x[i]*y[j]
r2.append((n/maxm))
w=w+1
l=l+1
if(l>length1):
break
for i in range(len(r2)):
print(r2[i])
plt.figure(figsize=(30,10))
plt.plot(r2,'b-',)
plt.title("Maximised Cross Correlation 2")
plt.grid()
indices = peakutils.indexes(r2, thres=0.70, min_dist=0.1)
trace = go.Scatter(
x=[j for j in range(len(r2))],
y=r2,
mode='lines',
name='Original Plot'
)
trace2 = go.Scatter(
x=indices,
y=[r2[j] for j in indices],
mode='markers',
marker=dict(
size=8,
color='rgb(255,0,0)',
symbol='cross'
),
name='Detected Peaks'
)
data = [trace, trace2]
py.iplot(data, filename='milk-production-plot-with-higher-peaks')
plt.show()
| [
"agc19shubham@gmail.com"
] | agc19shubham@gmail.com |
aed1f7d9aa11a49f7656442d5d5040bfb80df6b3 | 262655fd203a91cd533f8ecec893f821de8b4c84 | /src/features/build_features.py | bb82e8218c19928ffb8372d7d40a17c3661164f8 | [
"MIT"
] | permissive | GeorgesAlkhouri/bikeshare | a73bcddf30a8f68f4db4a0fe3368c81e3bf70d4c | 8dfd83bc062d2c557d5d14f303b3377de9df4353 | refs/heads/master | 2020-11-25T00:06:15.590043 | 2020-01-08T12:43:39 | 2020-01-08T12:43:39 | 228,397,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | def cast_column_type(df):
category_features = ['season', 'holiday', 'mnth', 'hr',
'weekday', 'workingday', 'weathersit']
df[category_features] = df[category_features].astype('category')
return df
def drop_columns(df):
df = df.drop(columns=['dteday', 'instant'])
return df
def build_spark_pipeline(df, **kwargs):
"""Build a pipeline that is executable on a spark cluster. This is just
a showcase implementation.
"""
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoderEstimator
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import GBTRegressor
one_hot_encoder = OneHotEncoderEstimator(inputCols=['season', 'yr', 'mnth'],
outputCols=['season_final', 'yr_final', 'mnth_final'])
vector_assembler = VectorAssembler(
inputCols=['temp', 'season_final', 'yr_final', 'mnth_final'],
outputCol='features')
min_max_transformer = MinMaxScaler(inputCol='features', outputCol='final_features')
regressor = GBTRegressor(featuresCol='final_features', maxIter=10)
pipeline = Pipeline(stages=[one_hot_encoder,
vector_assembler,
min_max_transformer,
regressor])
return None, None, pipeline
def build_sklearn_pipline(df, **kwargs):
"""Build the sklearn prototype pipeline that is also used in the
Jupyter Notebook to generate the stated results in the report.
Note: Data preprocessing is done in the Jupyter Notebook and
was not implemented back in the project code because of time purposes.
"""
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import GradientBoostingRegressor
pre_transformer = ColumnTransformer(
transformers=[('min_max_features', MinMaxScaler(), ['temp', 'hum', 'windspeed',
'season', 'yr', 'mnth', 'hr',
'holiday', 'weekday', 'workingday', 'weathersit'])],
remainder='drop')
regressor = GradientBoostingRegressor()
pipeline = make_pipeline(pre_transformer, regressor)
pipeline.set_params(gradientboostingregressor__n_estimators=4000,
gradientboostingregressor__alpha=0.01)
y = df['cnt']
X = df.drop(['cnt', 'casual', 'registered'], axis=1)
return X, y, pipeline
| [
"alkhouri@informatik.uni-leipzig.de"
] | alkhouri@informatik.uni-leipzig.de |
a9ea1d708b1afa66b221cc8119ea3d36a1a87fed | 75669eaa1f249beefa8f0b23beb1edbe72e6e92f | /function_practice.py | 9e0e5765e9a9fd8abaa21abf8ce619b4a019289f | [] | no_license | faryalj/HighSchoolCamp | 0859d70e6050a1d15185c2adccc7d601057b246d | f30fa2b05982cf8a4fbed71901f6f9bac8d2d25c | refs/heads/master | 2020-06-03T14:22:32.632876 | 2019-06-14T19:21:31 | 2019-06-14T19:21:31 | 191,602,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | """
title: function_practice
author: Faryal
date: 2019-06-12 11:22
"""
#Age_calculator
def age_calculator(current_date, birth_year):
return current_date - birth_year
current_date = int(input("What year is it?"))
birth_year = int(input("When were you born?"))
print(f"You are " + str(current_date - birth_year) + " years old.")
#Averaging_numbers
def avg_number(x,y,z):
return (x+y+z)/3
x = int(input("x:"))
y = int(input("y:"))
z = int(input("z:"))
print(avg_number(x,y,z)) | [
"faryalmjabbar@gmail.com"
] | faryalmjabbar@gmail.com |
db1dce711edcbeb35ad6b34bcd858cc7e5978198 | ffb6a79786b1446a2f1ed66e44f9d2225b2eb463 | /calculate_hours_google.py | 78a61e392b3adc1db07ab6fb7d5fdc042fc3ca76 | [] | no_license | rborgwald/calculate-hours | eeb15d5c47fca2a81ac2ca1766b96843f2ec57f9 | a58bd7e5237600e99c372fe332290586801ccf39 | refs/heads/master | 2020-03-14T03:51:57.788622 | 2018-05-26T21:55:28 | 2018-05-26T21:55:28 | 131,428,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | #!/usr/bin/python
# To set this script up, follow instructions at https://developers.google.com/calendar/quickstart/python
# Also, you might need to run: pip3 install --upgrade google-api-python-client
from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from configparser import ConfigParser
import datetime
import sys
import pytz
import re
def addColonToTimeZone(datetime):
return re.sub(r'(.*)(00)', r'\1:\2', datetime)
def removeColonFromTimeZone(datetime):
return re.sub(r'(.*):(00)', r'\1\2', datetime)
def computeStartTime(startDate):
local_tz = pytz.timezone("US/Central")
datetime_without_tz = datetime.datetime.strptime(startDate, '%m/%d/%Y')
datetime_with_tz = local_tz.localize(datetime_without_tz, is_dst=None).strftime('%Y-%m-%dT%H:%M:%S%z')
return addColonToTimeZone(datetime_with_tz)
def getStartOfDayDateTime(date):
local_tz = pytz.timezone("US/Central")
datetime_without_tz = datetime.datetime.combine(date, datetime.time.min)
datetime_with_tz = local_tz.localize(datetime_without_tz, is_dst=None)
return datetime_with_tz
def getEndOfDayDateTime(date):
local_tz = pytz.timezone("US/Central")
datetime_without_tz = datetime.datetime.combine(date, datetime.time.max)
datetime_with_tz = local_tz.localize(datetime_without_tz, is_dst=None)
return datetime_with_tz
def computeStopTime(startDate, numberOfDays):
local_tz = pytz.timezone("US/Central")
start_date_without_tz = datetime.datetime.strptime(startDate, '%m/%d/%Y')
end_date_without_tz = start_date_without_tz + datetime.timedelta(days=numberOfDays + 1)
datetime_with_tz = local_tz.localize(end_date_without_tz, is_dst=None).strftime('%Y-%m-%dT%H:%M:%S%z')
return addColonToTimeZone(datetime_with_tz)
def calculateHoursForDay(date, daysActions):
minutes = 0
startOfDay = getStartOfDayDateTime(date)
startWorkTime = None
if len(daysActions) == 0:
return minutes
# check if first action is a stop
if daysActions[0]['action'] == 'You exited work':
minutes += (daysActions[0]['timestamp'] - startOfDay) / datetime.timedelta(minutes=1)
for a in daysActions:
if a['action'] == 'You entered work':
startWorkTime = a['timestamp']
elif a['action'] == 'You exited work' and startWorkTime != None:
minutes += (a['timestamp'] - startWorkTime) / datetime.timedelta(minutes=1)
startWorkTime = None
# check if last action is a start
if daysActions[-1]['action'] == 'You entered work':
endOfDay = getEndOfDayDateTime(date)
minutes += (endOfDay - daysActions[-1]['timestamp']) / datetime.timedelta(minutes=1)
return round(minutes / 60, 2)
def calculateHours(events):
daysDict = {}
# Group events into days
for event in events:
dateTimeStr = event['start'].get('dateTime')
dateTime = datetime.datetime.strptime(removeColonFromTimeZone(dateTimeStr), '%Y-%m-%dT%H:%M:%S%z')
dateStr = dateTime.date()
eventSummary = { 'timestamp': dateTime, 'action': event['summary']}
if dateStr in daysDict:
daysDict[dateStr]['summaries'].append(eventSummary)
else:
data = {'summaries': [eventSummary]}
daysDict[dateStr] = data
# Calculate hours for each day
for date in daysDict:
hours = calculateHoursForDay(date, daysDict[date]['summaries'])
daysDict[date].update({'hours': hours})
return daysDict
def main():
numberOfDays = 7
if len(sys.argv) < 2:
raise RuntimeError(
f'Incorrect number of arguments: {len(sys.argv)}. Expected at least one (mm/dd/YYYY). A second optional arg for number of days (default is 7).')
else:
startDate = sys.argv[1]
if (len(sys.argv) > 2):
numberOfDays = int(sys.argv[2])
startTime = computeStartTime(startDate)
stopTime = computeStopTime(startDate, numberOfDays)
# Setup the Calendar API
config = ConfigParser()
config.read('calendar.ini')
calendar_id = config.get('google_calendar', 'calendar_id')
url = config.get('google_calendar', 'url')
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secrets.json', url)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
print('Getting work events')
events_result = service.events().list(calendarId=calendar_id,
timeMin=startTime,
timeMax=stopTime,
singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
# for event in events:
# start = event['start'].get('dateTime')
# print(start, event['summary'])
days = calculateHours(events)
print()
print(f'Date Hours Worked\n---------------------------')
totalHours = 0
for key, value in days.items():
totalHours += value["hours"]
print(f'{key} {value["hours"]}')
print()
print(f'Total Hours: {round(totalHours, 2)}')
if __name__ == "__main__":
main() | [
"rborgwald2885@gmail.com"
] | rborgwald2885@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.