blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5f92d828ee62431a444c5e91a5c8221d3cfc4ba | 429c3b272030641fd62652d052e21e7814fbe17e | /setup.py | 8429a728e869b979f6dd953863f4168141d861d0 | [
"MIT"
] | permissive | williamd1618/ksql-python | 774911ad913557e39cd8ebd04e1c17a0c86686d4 | 42b6f684597716cdaf69b42caa718e2a32b36603 | refs/heads/master | 2020-03-11T04:04:17.039767 | 2018-01-15T07:03:21 | 2018-01-15T07:03:21 | 129,767,462 | 0 | 0 | null | 2018-04-16T15:43:02 | 2018-04-16T15:43:02 | null | UTF-8 | Python | false | false | 1,508 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Setup module """
import os
import re
from setuptools import setup
from pip.req import parse_requirements
# Get version from __init__.py file
VERSION = "0.3.0.2.22"
here = os.path.dirname(__file__)
# Get long description
README = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
reqs = ['requests',
'six',
'urllib3']
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="ksql",
version=VERSION,
description="A Python wrapper for the KSql REST API",
long_description=README,
author="Bryan Yang @ Vpon",
author_email="kenshin200528@gmail.com",
url="https://github.com/bryanyang0528/ksql-python",
license="MIT License",
packages=[
"ksql"
],
include_package_data=True,
platforms=['any'],
install_requires=reqs,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
| [
"kenshin2004528@gmail.com"
] | kenshin2004528@gmail.com |
889cc8fe5b495309708d35c56ef72d7f8db8646e | 5045030c79a0956415bfe57a55e01593fc310214 | /blogproject/accounts/views.py | 837c84f28df473174da34f2a244c328386499e71 | [] | no_license | Werfit/WDjangoReactBlog | 92b49b9bb74005c63a66badd969bbff8b886a81f | 6447a69edfb3c7afcf1d94b14226976091deb5fd | refs/heads/master | 2023-03-07T10:23:25.949406 | 2021-02-20T10:54:52 | 2021-02-20T10:54:52 | 340,630,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,856 | py | from rest_framework import generics, permissions, status
from rest_framework.response import Response
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from knox.models import AuthToken
from .serializers import *
from .models import Profile
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = RegisterSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(serializer.data, context= self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context= self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class UserAPI(generics.RetrieveAPIView):
permission_classes = (permissions.IsAuthenticated, )
serializer_class = UserSerializer
def get_object(self):
return self.request.user
class ProfileAPI(generics.RetrieveAPIView):
permission_classes = (permissions.AllowAny, )
serializer_class = ProfileSerializer
def get(self, request, *args, **kwargs):
try:
profile = Profile.objects.get(owner__id=self.kwargs['pk'])
return Response({
"profile": ProfileSerializer(profile).data,
"username": profile.owner.username
})
except ObjectDoesNotExist:
return Response({
"detail": "Profile is not found"
}, status=status.HTTP_404_NOT_FOUND)
| [
"werfit@Slaviks-MacBook-Pro.local"
] | werfit@Slaviks-MacBook-Pro.local |
cc175ac74f032d57d8641a106ebead8e8f7f8a10 | 7c9707f0f1cb8e633ac605934f3dbd8036790868 | /projet/rpi_manager/migrations/0002_ph.py | 71da2a49b682367bb47761ea2e6341addf2a5fc5 | [] | no_license | ometeore/hydropo | 891e1abd4c1b8ccd0a3b27a043abf894b70ceb5b | 324076d4b7ddbd14e718c424eb24d129c2a2243c | refs/heads/master | 2023-06-14T08:35:55.838469 | 2021-07-04T16:28:09 | 2021-07-04T16:28:09 | 290,198,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # Generated by Django 3.1 on 2020-08-25 13:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("rpi_manager", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Ph",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateTimeField()),
("value", models.FloatField()),
(
"rpi",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="rpi_manager.rpi",
),
),
],
),
]
| [
"pilt64@hotmail.fr"
] | pilt64@hotmail.fr |
a58d31267abb259207c5a6ac44d0e161b10ae7b2 | 37da840bfc8e8299e6415a4816d6d6b72e1e1807 | /fizika.py | bbed51ce22cd4641ad3bd3edee3325def85c4f1f | [] | no_license | stasextaz/chikibambminecraft | 523e06d139a63b0cd5a222f0ab8ff298b37d18ba | 7c23be155797993a3a1e1015d13996082a3d7577 | refs/heads/master | 2020-09-22T08:39:32.317675 | 2019-12-24T15:12:02 | 2019-12-24T15:12:02 | 225,124,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | fizika = [2,3,2,2,3,3,3]
fizika.count(2)
def fizika1():
for i in range(fizika.count(2)):
fizika.remove(2)
for j in range(fizika.count(3)):
i = fizika.index(3)
fizika.pop(i)
fizika.insert(i,4)
for p in range(fizika.count(4)):
i = fizika.index(4)
fizika.pop(i)
fizika.insert(i,5)
print (fizika)
fizika1()
| [
"noreply@github.com"
] | noreply@github.com |
a602a6bd91d133db3c68be3c4b4d25e48dc8bb36 | c02b68e0e1962cab85e9ab882080c68f594d764b | /src/unifide_backend/action/brand/__init__.py | 382b99103ef9c0480ec78fe4dd017daeeb81b733 | [] | no_license | nubela/unifide-backend | b892fab58193488560c852f784ee05f2b656df01 | 0be338be06380941b43601e29326b195e6bc8778 | refs/heads/master | 2021-01-10T20:23:00.291958 | 2013-10-16T15:56:06 | 2013-10-16T15:56:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | from unifide_backend.action.brand.action import * | [
"nubela@gmail.com"
] | nubela@gmail.com |
142bd6e7f8a71debb064f1499a40085ef6517cf7 | 524cf264b53a353217a0dd7127889db0ec735bec | /pytest/pytest_simplesecclient.py | 78a82dc0430d53e86c31288580f35cc44fea395e | [] | no_license | ChengHsu/NetworkSecurity | 575b993bc11f0ec781a13ff4a1374d7203cb863c | a9ba3ca32794bd55bf1058593aea33d7850850ff | refs/heads/master | 2020-03-28T02:22:46.080800 | 2018-12-30T05:20:48 | 2018-12-30T05:20:48 | 147,566,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import asyncio
import time
import sys
import os
def findfile_num(start, name):
c = 0
for relpath, dirs, files in os.walk(start):
for file in files:
if file.startswith(name):
c += 1
return c + 1
async def tcp_echo_client(plaintxt, loop):
reader, writer = await asyncio.open_connection('127.0.0.1',int(sys.argv[1]),loop=loop)
print('Send encrpt,%s' % plaintxt)
cmd1 = 'encrypt,%s' % plaintxt
writer.write(cmd1.encode())
data1 = await reader.read(1024)
if not data1 or data1.decode() == "EXIT":
loop.stop()
response1 = data1.decode('utf-8')
print('Received: %r' % response1)
ciphertxt = response1.split(',')[1]
print('Send decrpt,%s' % ciphertxt)
cmd2 = 'decrypt,%s' % ciphertxt
writer.write(cmd2.encode('utf-8'))
data2 = await reader.read(1024)
if not data2 or data2.decode() == "EXIT":
loop.stop()
response2 = data2.decode()
print('Received: %r' % response2)
count = findfile_num(os.getcwd(), 'security_response_')
f = open('security_response_%s.txt'%count, 'w')
f.write('timestamp: %s' % str(time.time()))
f.write('\n')
f.write('P: %s' % plaintxt)
f.write('\n')
f.write('C: %s' % ciphertxt)
f.close()
print('Close the socket')
writer.close()
loop = asyncio.get_event_loop()
plaintxt = sys.argv[2]
loop.run_until_complete(tcp_echo_client(plaintxt, loop))
loop.close()
| [
"chenghsu0224@gmail.com"
] | chenghsu0224@gmail.com |
ee1b03e83ea540328e12e93a69490d8e49e7779a | 8b8ef5fba1dfba0b8bd9fb3d724e69048bbc0f13 | /models/student.py | 8458b4855f8adc10b1a60d043175ec77eb4e7319 | [] | no_license | zbaylin/BookWorm | 5e66331870cfaac541aa157a3568cb4b6bddc49a | 6eda4d7e8c5c4aff779dcfcab2eee7126a9cad99 | refs/heads/master | 2020-04-05T10:26:32.318741 | 2019-02-13T01:17:22 | 2019-02-13T01:17:22 | 156,798,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | import json
import util.dict
class Student():
def __init__(
self,
firstname,
lastname,
email,
grade,
ID=None,
password=None,
num_issuances=0,
):
self.firstname = firstname
self.lastname = lastname
self.email = email
self.grade = grade
self.id = ID
self.password = password
self.num_issuances = num_issuances
@classmethod
def from_json(cls, json_map):
if type(json_map) is str:
json_map = json.loads(json_map)
issuances = util.dict.safe_get(json_map, "issuances")
return cls(
json_map["firstname"],
json_map["lastname"],
json_map["email"],
json_map["grade"],
util.dict.safe_get(json_map, "id"),
util.dict.safe_get(json_map, "password"),
len(issuances) if issuances else 0
) | [
"zbaylin@gmail.com"
] | zbaylin@gmail.com |
fcea52caf6cd8c18bd6aa88e38b67ee777906660 | dedf3082c0026d52200361a34262c752c05a193b | /Hackerrank-Solutions/Hackerrank-Python-Solutions/Regex and Parsing/Regex Substitution.py | 65949f65eea2f201a40ee97f5307fb0b3baa06f4 | [
"MIT"
] | permissive | HetDaftary/Competitive-Coding-Solutions | 0d31568ab5be7292d28883704f15e62a2496f637 | a683fa11895410c6eef07b1a68054f3e90aa596b | refs/heads/main | 2023-04-02T11:58:00.731977 | 2021-03-31T14:23:39 | 2021-03-31T14:23:39 | 351,979,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
import re
N = int(input())
for i in range(N):
print(re.sub(r'(?<= )(&&|\|\|)(?= )', lambda x: 'and' if x.group() == '&&' else 'or', input()))
| [
"hetdaftary@gmail.com"
] | hetdaftary@gmail.com |
423824d04b9ff1a989d3a18f132c057b03f82f22 | 4554f8d3ab1a6267b17dad2b4d2c47b0abe8d746 | /benchmarking/remote/devices.py | c7cacd80d2eb8e3a4eb17eebb98a6ac45237cf32 | [
"Apache-2.0"
] | permissive | jteller/FAI-PEP | 44fead3ca26f4844067d455c86ac8c5bfaf79a14 | 73b8a08815675135e9da7d68375d1218cbd04eaa | refs/heads/master | 2020-04-29T06:04:19.197966 | 2019-03-15T23:32:54 | 2019-03-15T23:32:54 | 175,904,011 | 0 | 0 | Apache-2.0 | 2019-03-15T23:30:04 | 2019-03-15T23:30:04 | null | UTF-8 | Python | false | false | 2,484 | py | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
from utils.devices import devices as devices_dict
class Devices(object):
def __init__(self, filename=None):
if filename:
# if the user provides filename, we will load it.
assert os.path.isfile(filename), \
"Device file {} does not exist".format(filename)
with open(filename, "r") as f:
self.devices = json.load(f)
else:
# otherwise read from internal
self.devices = devices_dict
self._elaborateDevices()
def getFullNames(self, devices):
names = devices.split(",")
new_names = [self.devices[name]["name"]
if name in self.devices else name for name in names]
return ",".join(new_names)
def getAbbrs(self, abbr):
if abbr in self.devices:
device = self.devices[abbr]
if "abbr" in device:
return device["abbr"]
return None
def _elaborateDevices(self):
device_abbr = []
for name, _ in self.devices.items():
device = self.devices[name]
assert "name" in device, \
"Field name is required in devices"
assert device["name"] == name, \
"Device key ({}) and name ({})".format(name, device["name"]) + \
" do not match"
if "abbr" in device:
assert isinstance(device["abbr"], list), \
"Abbreviations for {} needs to be a list".format(name)
for abbr in device["abbr"]:
device_abbr.append((device, abbr))
for device_abbr_pair in device_abbr:
self._elaborateOneDevice(device_abbr_pair[0], device_abbr_pair[1])
def _elaborateOneDevice(self, device, abbr):
assert abbr not in self.devices, "Abbreviation " + \
"{} is already specified in the device list".format(abbr)
self.devices[abbr] = device
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
2b733cc49a3d904c59a1710110ebf794cacf8a94 | 17de3571cfbd99b7541d15c55231575eb635260b | /txt2rpy.py | a2bdb80db9e5c7a1a3b43929297c77e8262793ae | [
"BSD-2-Clause"
] | permissive | sinsong/literal-script-compiler | f56f64afd8806552f08da4d72bca9e1f1f6e3eff | bc16e66581336d7e448461532490876a6ff2a865 | refs/heads/master | 2020-06-02T10:49:40.086612 | 2019-06-10T09:39:40 | 2019-06-10T09:39:40 | 191,131,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import sys
import re
from core.row_file import row_file_open
from core.lparser import lparse
from core.llparser import llparse
from core.parser import parse
from core.codegen import codegen
from core.context import Context
# raw_content -> raw_line
# raw_line -> logic_lines
# logic_lines -> ir
# ir -> code_gen
def main():
if len(sys.argv) < 2:
return
raw_content = row_file_open(sys.argv[1])
raw_lines = lparse(raw_content)
context = Context()
llparse(context, raw_lines)
parse(context)
codegen(context)
if __name__ == '__main__':
main() | [
"literalkernel@outlook.com"
] | literalkernel@outlook.com |
898a057527760f01aeb95b618322cf09388c1f42 | 02e23da0431623db86c8138bda350a1d526d4185 | /Archivos Python Documentos/Graficas/.history/TRABAJO_SPT_v3_20200224230649.py | 0fe0123926d8f3bed47ddd88db91bc709c442b12 | [] | no_license | Jaamunozr/Archivos-python | d9996d3d10ff8429cd1b4c2b396016a3a5482889 | 1f0af9ba08f12ac27e111fcceed49bbcf3b39657 | refs/heads/master | 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,848 | py | import os
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
#------------------------------------------------------------------------------
os.system("clear")
fig = pl.figure()
axx = Axes3D(fig)
raiz=np.sqrt
ln=np.log
puntoX=float(0)
puntoY=float(0)
#puntoX=float(input("Seleccione la coordenada en X donde desea calcular el potencial: "))
#puntoY=float(input("Seleccione la coordenada en Y donde desea calcular el potencial: "))
print("Calculando ...")
#------------------------------------------------------------------------------
Xa = np.arange(-10, 10, 0.1) #Rango de coordenadas de X
Ya = np.arange(-10, 10, 0.1) #Rango de coordenadas de Y
l = 2 #Longitud del electrodo [m]
rho= 100 #Resistividad de terrreno [Ohm/m]
Ik=200 #Corriente de falla [A] (Total)
Rad=0.01 #Radio del electrodo [m]
Electrodos=8 #Número de electrodos
Pos1=4 #Posición 1 en Y para analisis de grafica 2D
Pos2=0 #Posición 2 en Y para analisis de grafica 2D
#------------------------------------------------------------------------------
#Posición de los electrodos
#------------------------------------------------------------------------------
P=np.array([
[-4,-4], #Electrodo A
[0,-4], #Electrodo B
[4,-4], #Electrodo C
[-4,0], #Electrodo D
[4,0], #Electrodo E
[-4,4], #Electrodo F
[0,4], #Electrodo G
[4,4] #Electrodo H
])
#------------------------------------------------------------------------------
E=Electrodos-1
ik=Ik/Electrodos
Vt=np.zeros((np.count_nonzero(Xa),np.count_nonzero(Ya)))
m=np.zeros((Electrodos,1))
V=np.zeros((Electrodos,1))
k=0
m2=np.zeros((Electrodos,1))
V2=np.zeros((Electrodos,1))
#------------------------------------------------------------------------------
#Cálculo del punto ingresado
#------------------------------------------------------------------------------
i=0
while i<=E:
m2[i][0] =round(raiz((((P[i][0])-puntoX)**2)+(((P[i][1])-puntoY)**2)),4)
o,u=((P[i][0])-puntoX),((P[i][1])-puntoY)
if ((o ==0) and (u==0)) or (m2[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m2[i][0]=Rad
V2[i][0] =ln((l+raiz((m2[i][0])**2+l**2))/(m2[i][0]))
i += 1
Vt2=(np.sum(V2)*(rho*ik))/(2*np.pi*l)
print("El potencial en el punto (",puntoX,",",puntoY,"), es de",round(Vt2,3),"[V]")
#------------------------------------------------------------------------------
#Cálculo de la malla
#------------------------------------------------------------------------------
Vxy = [1] * (np.count_nonzero(Ya))
while k<np.count_nonzero(Ya):
Y=round(Ya[k],3)
t=0
while t<np.count_nonzero(Xa):
X=round(Xa[t],3)
i=0
while i<=E:
m[i][0] =round(raiz((((P[i][0])-X)**2)+(((P[i][1])-Y)**2)),4)
o,u=((P[i][0])-X),((P[i][1])-Y)
if ((o ==0) and (u==0)) or (m[i][0]==0):
#print("Elementos de matriz",k,t, "x,y",P[i][0],P[i][1],"punto de eje",X,Y )
m[i][0]=Rad
V[i][0] =ln((l+raiz((m[i][0])**2+l**2))/(m[i][0]))
i += 1
Vt[k][t]=np.sum(V)
if Y==Pos1:
Vxa=Vt[k]
if Y==Pos2:
Vxb=Vt[k]
if (Y==X) and ((X-Y)==0):
Vxy[k]=Vt[k][t]*(rho*ik)/(2*np.pi*l)
t +=1
k +=1
Vtt=(Vt*(rho*ik))/(2*np.pi*l)
Vxa=(Vxa*(rho*ik))/(2*np.pi*l)
Vxb=(Vxb*(rho*ik))/(2*np.pi*l)
aa=np.where(np.amax(Vtt) == Vtt)
print ("Valor máximo de tensión (GPR):",round(Vtt[::].max(),3),"[V], en posición: (",round(Xa[aa[0][0]],2),",",round(Ya[aa[1][0]],2),")")
bb=np.where(np.amin(Vtt) == Vtt)
print("Valor de Resistencia de puesta a tierra:", (round(Vtt[::].max(),3)/Ik), "[Ohm]")
#print ("Valor mínimo de tensión:",round(Vtt[::].min(),3),"[V], en posición: (",round(Xa[bb[0][0]],2),",",round(Ya[bb[1][0]],2),")")
print ("Número de elementos de Vt:",np.count_nonzero(Vtt))
#------------------------------------------------------------------------------
# GRAFICAS 3D
#------------------------------------------------------------------------------
# Configurar una figura dos veces más alta que ancha
#fig = plt.figure(figsize=plt.figaspect(0.2))
#fig = plt.figure(4,figsize=(6,4.5)) #(Ancho, alto)
#fig.suptitle('Potencial')
fig = plt.figure(figsize=plt.figaspect(2.))
fig.suptitle('A tale of 2 subplots')
# Primera imagen a imprimir
ax = fig.add_subplot(2, 2, 1, projection='3d')
X, Y = np.meshgrid(Xa, Ya)
#surf = ax.plot_surface(X, Y, Vtt, cmap = cm.get_cmap("jet"))#, antialiased=False)
surf = ax.plot_surface(X, Y, Vtt, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim(300, 1800)
#fig.colorbar(surf)
#------------------------------------------------------------------------------
#Graficas en 2D
#------------------------------------------------------------------------------
x1=Xa
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, Vxa, color="blue", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X1 vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 1')
ax = fig.add_subplot(2, 2, 3)
ax.plot(x1, Vxb, color="red", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X2 vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 2')
ax = fig.add_subplot(2, 2, 4)
ax.plot(x1, Vxy, color="green", linewidth=1.0, linestyle="-")
ax.title.set_text('Eje X,Y vs V')
ax.grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
ax.set_ylabel('Grafica 3')
plt.pause(25)
pl.savefig('tierras.pdf')
| [
"jaamunozr@gmail.com"
] | jaamunozr@gmail.com |
9378b8553218a33230ad97549dba2ea78a1f7bf6 | b8e0147172dd54e5f9c6b7f6ebd55d93155e029b | /server.py | a712fb527014e321da17b178823a68af35bb90c1 | [] | no_license | thaiantt/Gdelt | 5d8491438a5f54600caf8085f44ced1b969cb0c8 | 776f0352a15887f51849fbef7ef4e522d17cc47a | refs/heads/master | 2021-05-11T02:52:21.500185 | 2018-01-23T07:33:50 | 2018-01-23T07:33:50 | 117,896,772 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,019 | py | # coding: utf-8
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import WebSocketServerProtocol
import json
from functions_api import *
from pymongo import MongoClient
# API
api = {'getAllHumanitarianEventsByRegionByYear': getAllHumanitarianEventsByRegionByYear,
'getDifferentEventsByRegionByYear': getDifferentEventsByRegionByYear,
'getDifferentEventsByRegionByMonthByYear': getDifferentEventsByRegionByMonthByYear,
'getAllHumanitarianEventsByRegionByMonthByYear': getAllHumanitarianEventsByRegionByMonthByYear,
'getCountDifferentEventsByCountryCodeByMonthByYear': getCountDifferentEventsByCountryCodeByMonthByYear,
'getEventByCountryCodeByStartByEnd': getEventByCountryCodeByStartByEnd,
'getCountDifferentEventsByCountryCodeByStartByEnd': getCountDifferentEventsByCountryCodeByStartByEnd,
'getEventsByBrushByStartByEnd': getEventsByBrushByStartByEnd,
'getCountAllByStartByEnd': getCountAllByStartByEnd,
'getLinksByRegionByStartByEnd': getLinksByRegionByStartByEnd,
'getUndirectedLinksByRegionByStartByEnd': getUndirectedLinksByRegionByStartByEnd}
class MyServerProtocol(WebSocketServerProtocol):
# def __init__(self, mdb):
# """
# Constructor
# :param mdb: a MongoDB database
# """
# self.db = mdb
def onConnect(self, request):
print("Client connecting: {}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {} bytes".format(len(payload)))
else:
msg = handle_msg(payload)
if msg:
self.sendMessage(msg.encode('utf8'), False)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {}".format(reason))
def handle_msg(msg):
request = json.loads(msg.decode('utf8'))
print("Text message received")
print("Request : " + request['fct'])
# return api[request['fct']](request["args"])
res = api[request['fct']](request["args"], database)
dump = json.dumps({'data': res,
'args': request["args"],
'fct': request['fct']})
return dump
if __name__ == '__main__':
import sys
# static file server seving index_old.html as root
root = File(".")
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
from autobahn.twisted.websocket import WebSocketServerFactory
# create a MongoClient to the running mongod instance
client = MongoClient('localhost', 27017)
# getting a Database
database = client.test
# create indexes
create_indexes(database)
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
reactor.listenTCP(9000, factory)
site = Site(root)
reactor.listenTCP(8080, site)
reactor.run()
| [
"thaian.tt@gmail.com"
] | thaian.tt@gmail.com |
12187cdb11c155892257555af0a9c93d9dc2d100 | b4b1459ecb0d9190b11e530ffc374153af5c349e | /filters.py | c4c0b7f9216ed70b0c1d2b591f456e8d5ab9c01c | [] | no_license | perintyler/nfl_tracker | 77aaa9a1d22a358e664da68890a134d8e8005b8b | eb7020c19b42b33c3cbad23af5e2823158cba569 | refs/heads/master | 2020-08-01T07:13:04.259355 | 2019-11-11T21:24:43 | 2019-11-11T21:24:43 | 210,909,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | def toBlackAndWhite(img):
avg = np.mean(img, axis=2)
bw = np.zeros(img.shape)
bw[np.where(avg>150)] = [255,255,255]
return bw
def getGreenChannel(img):
green = img.copy()
green[:,:,0] = 0
green[:,:,2] = 0
return green
def getBrightChannel(img):
return | [
"perintyler@gmail.com"
] | perintyler@gmail.com |
a914aec5310e3f105c8274a919bfec0b29a9cbea | 68ec834c72d32f1c393235035a4bad13f5e7dc46 | /test.py | b268c117867837083a4c96968d409b9ceb152310 | [] | no_license | jordanwesthoff/scantron | da41bbb42d0c243ce3246f129ea1f195d3af3f5b | a06bd8cf256baffa1066966b046d03438510e772 | refs/heads/master | 2021-01-19T05:39:12.664486 | 2015-05-13T22:14:03 | 2015-05-13T22:14:03 | 34,220,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | print 'This is a test.py'
| [
"mkp9617@rit.edu"
] | mkp9617@rit.edu |
6a28e7551bac14d5e50e838a962b64b49a7008ae | 057722b227e9f51c78bd77b622859674016f19dc | /homework4/code/p7/trysvm.py | 8783e7fd91174a983250421516e6938b0597d778 | [] | no_license | walkerning/Homework-pattern_recognition | 56508bc66d0932ad8c9899658d8229169d800551 | 843a79d1f4cc278839ade27a593ae66e603ac4ba | refs/heads/master | 2021-03-19T15:30:55.581932 | 2017-05-31T15:51:22 | 2017-05-31T15:51:22 | 84,166,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | # -*- coding: utf-8 -*-
import numpy as np
from sklearn import svm
samples_w1 = np.array([[-3.0, 0.5, 2.9, -0.1, -4.0, -1.3, -3.4, -4.1, -5.1, 1.9],
[-2.9, 8.7, 2.1, 5.2, 2.2, 3.7, 6.2, 3.4, 1.6, 5.1]]).T
samples_w2 = np.array([[-2.0, -8.9, -4.2, -8.5, -6.7, -0.5, -5.3, -8.7, -7.1, -8.0],
[-8.4, 0.2, -7.7, -3.2, -4.0, -9.2, -6.7, -6.4, -9.7, -6.3]]).T
def transform_data(data):
# return 1 x1 x2 x1**2 x2**2 x1x2
return np.hstack((np.ones((data.shape[0], 1)), data, data**2, (data[:, 0] * data[:, 1])[:, np.newaxis]))
def main():
# set misclassification penalty to a large enough value
trans_samples_w1 = transform_data(samples_w1)
trans_samples_w2 = transform_data(samples_w2)
# data = np.vstack((trans_samples_w1[0, :], trans_samples_w2[0, :]))
# labels = [0, 1]
# res = svm.SVC(C=1e10, kernel="linear").fit(data, labels)
# m = np.sqrt(res.coef_[0].dot(res.coef_[0]))
# margin1 = (res.coef_.dot(trans_samples_w1[0,:]) + res.intercept_) / m
# margin2 = (res.coef_.dot(trans_samples_w2[0,:]) + res.intercept_) / m
# print "margin of w1 {}: {}; margin of w2 {}: {}".format(trans_samples_w1[0, :], margin1,
# trans_samples_w2[0, :], margin2)
for num in range(1, samples_w1.shape[0]+1):
data = np.vstack((trans_samples_w1[:num, :], trans_samples_w2[:num, :]))
labels = np.hstack((np.zeros(num), np.ones(num)))
res = svm.SVC(C=1e10, kernel="linear").fit(data, labels)
print "sample number: {}, coef: {}, b: {}, margin: {}".format(num*2, res.coef_, res.intercept_, np.sqrt(1/(res.coef_[0].dot(res.coef_[0]))))
if __name__ == "__main__":
main()
| [
"foxdoraame@gmail.com"
] | foxdoraame@gmail.com |
2f60880d4c4c0a1f5c94b43bdaba7a41cacd7635 | 08123bd466ec99bc91db6ebdf5023b31ffce0862 | /etl/etl.py | 48432eef0f5bd21dd6b7548a0ac3be140cecf7f6 | [] | no_license | lbedner/rose-of-eternity-legacy-reviews-etl | 3d1a06ff57f05486f3a10dfb7261edfca77c4f24 | 1dbbe150ec8ca1377e8e347354331ff0f638392e | refs/heads/master | 2023-04-28T18:42:57.068118 | 2021-05-01T03:33:44 | 2021-05-01T03:33:44 | 360,008,809 | 0 | 0 | null | 2021-05-01T03:33:44 | 2021-04-21T02:26:21 | Python | UTF-8 | Python | false | false | 5,880 | py | """ETL for legacy Rose of Eternity reviews archived on wayback machine."""
import csv
import datetime
from dateutil import parser
import logging
import os
from typing import Optional
from urllib.parse import ParseResult, urlparse
from bs4 import BeautifulSoup
import psycopg2
import psycopg2.extras
import requests
from . import settings
# Setup logger
if settings.LOG_FILE:
logging.basicConfig(filename=settings.LOG_FILE, filemode='w')
formatter: logging.Formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s',
)
logger: logging.Logger = logging.getLogger(__name__)
handler: logging.StreamHandler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(settings.LOG_LEVEL)
def create_and_get_db_connection() -> psycopg2.extensions.connection:
"""Create and return a database connection.
Returns:
Database connection.
"""
logger.info(f'Connecting to {settings.DATABASE_URI}...')
result: ParseResult = urlparse(settings.DATABASE_URI)
return psycopg2.connect(
database=result.path[1:],
user=result.username,
password=result.password,
host=result.hostname,
port=result.port,
)
def download_review_page(url: str) -> Optional[str]:
"""Download and returns a HTML page representing the review.
Args:
url: Wayback machine URL.
Returns:
HTML page representing the review.
"""
logger.info(f'Downloading: {url}')
response: requests.Response = requests.get(url)
response_code: int = response.status_code
if response_code == 200:
logger.info(f'Success - Response Code {response_code}')
return response.text
logger.info(f'Failure - Response Code {response_code}')
return None
def scrape_review_page(review_page: str) -> list[dict]:
"""Scrape the review page and return all the reviews.
Args:
review_page: HTML representation of the review.
Returns:
Reviews.
"""
reviews: list[dict] = []
soup: BeautifulSoup = BeautifulSoup(review_page, 'html.parser')
table: BeautifulSoup = soup.body.center.table
rows: list = table.find_all('tr')
for row in rows:
columns: list = row.find_all('td')
if columns:
# User Data
user_data_column: BeautifulSoup = columns[0]
user_id = user_data_column.a.attrs['href'].split('id')[1].replace(
'=',
''
)
user_name = user_data_column.find('a', href=True).text
# Review Score
review_score: float = columns[1].text
# Review
content: str = columns[2].text.replace('\n', '')
# Review Date
review_date: str = parser.parse(
columns[3].text
).strftime('%Y-%m-%d')
review = {
'user_id': user_id,
'user_name': user_name,
'score': review_score,
'content': content,
'date': review_date,
}
reviews.append(review)
logger.info(f'Scraped {len(reviews)} Reviews!')
# Reverse list so that the reviews at the bottom of the page
# (which are the earliest) are at the beginning of the list
reviews.reverse()
return reviews
def write_reviews_to_tsv(url: str, reviews: list[dict]) -> Optional[str]:
"""Write reviews to TSV file.
Args:
url: Wayback machine URL.
reviews: Scaped reviews.
Returns:
Name of TSV file.
"""
# Create TSV filename based off of Wayback Machine URL
# and output directory
tsv_filename: str = os.path.basename(url).replace('html', 'tsv')
tsv_filename = os.path.join(settings.REVIEWS_OUTPUT_FOLDER, tsv_filename)
# Write TSV file
logger.info(f'Writing {tsv_filename}')
with open(tsv_filename, 'w') as output_file:
dict_writer: csv.DictWriter = csv.DictWriter(
output_file,
fieldnames=reviews[0].keys(),
delimiter='\t',
)
dict_writer.writeheader()
dict_writer.writerows(reviews)
return tsv_filename
return None
def import_reviews(
tsv_filename: str,
conn: psycopg2.extensions.connection,
cur: psycopg2.extras.RealDictCursor
) -> None:
"""Import reviews into the database.
Args:
tsv_filename: Name of TSV file containing reviews.
"""
# Bulk insert review file
logger.info(f'Bulk inserting {tsv_filename}...')
with open(tsv_filename, 'r') as f:
next(f)
cur.copy_from(
f,
settings.REVIEW_TABLE,
sep='\t',
columns=('user_id', 'user_name', 'score', 'content', 'date')
)
conn.commit()
if __name__ == '__main__':
start: datetime.datetime = datetime.datetime.now()
# Clean the database
logger.info('Cleaning the database...')
conn: psycopg2.extensions.connection = create_and_get_db_connection()
cur: psycopg2.extras.RealDictCursor = conn.cursor()
cur.execute(f'TRUNCATE TABLE {settings.REVIEW_TABLE}')
conn.commit()
# Iterate through all the review URL's
for url in settings.URLS:
logger.info('')
# Download and store reviews (HTML)
review_page: Optional[str] = download_review_page(url)
# Scrape the review page for reviews
if review_page:
reviews: list[dict] = scrape_review_page(review_page)
# Export reviews to TSV file
tsv_filename: Optional[str] = write_reviews_to_tsv(url, reviews)
# Bulk import the TSV file to database
if tsv_filename:
import_reviews(tsv_filename, conn, cur)
conn.close()
logger.info('')
logger.info(f'Running Time: {str(datetime.datetime.now() - start)}')
| [
"LeonardBedner@iheartmedia.com"
] | LeonardBedner@iheartmedia.com |
431a60378e86b4b85d841143ab2f513bb7bbeeff | 1b5cc8dc487da59455dfe6749796870d51d5ab87 | /src/collective/iptvusp/tests/test_uspvideo.py | 72b74796685ac00b3964064bc7a733813671c2c5 | [] | no_license | simplesconsultoria/collective.iptvusp | eddcd726a800933127b04959bba90c63210049dc | 89b14ee4a01e19ef5cd7198c5bdf808ef555f1f0 | refs/heads/master | 2021-01-01T18:29:41.272115 | 2013-03-12T19:01:25 | 2013-03-12T19:01:25 | 6,388,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | # -*- coding: utf-8 -*-
import unittest2 as unittest
from zope.component import createObject
from zope.component import queryUtility
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.dexterity.interfaces import IDexterityFTI
from plone.app.dexterity.behaviors.exclfromnav import IExcludeFromNavigation
from collective.iptvusp.content import IUSPVideo
from collective.iptvusp.testing import INTEGRATION_TESTING
class CoverIntegrationTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
self.portal.invokeFactory('Folder', 'test-folder')
setRoles(self.portal, TEST_USER_ID, ['Member'])
self.folder = self.portal['test-folder']
self.folder.invokeFactory('iptvusp.uspvideo', 'c1',
template_layout='Layout A')
self.c1 = self.folder['c1']
def test_adding(self):
self.assertTrue(IUSPVideo.providedBy(self.c1))
def test_fti(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
self.assertNotEqual(None, fti)
def test_schema(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
schema = fti.lookupSchema()
self.assertEqual(IUSPVideo, schema)
def test_factory(self):
fti = queryUtility(IDexterityFTI,
name='iptvusp.uspvideo')
factory = fti.factory
new_object = createObject(factory)
self.assertTrue(IUSPVideo.providedBy(new_object))
def test_exclude_from_navigation_behavior(self):
self.assertTrue(IExcludeFromNavigation.providedBy(self.c1))
| [
"erico@simplesconsultoria.com.br"
] | erico@simplesconsultoria.com.br |
615158eadfcbf02124c5f25128a0cf68ffa28514 | c2ef2deccd5319ad291317a16e8cb96fe1cf26dd | /Sizing_FrozenAug26/engine.py | 7106895f1511c8ae22babd9e6c98760bff103be8 | [
"MIT"
] | permissive | icl-rocketry/optimalSizing | 9045aef128f7c3b339bd6ee251dd29f873d34cd5 | c23f5a84bc9f46cf86977ec7da97dbf7126dcb1b | refs/heads/master | 2020-07-02T13:53:59.344763 | 2019-11-06T00:54:31 | 2019-11-06T00:54:31 | 201,545,820 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,064 | py | import gpkit
from gpkit import Model, Variable
from gpkit.constraints.tight import Tight
from gpkit.constraints.loose import Loose
from gpkit.constraints.bounded import Bounded
from gpkit import ureg
class SimpleEngine(Model):
def setup(self):
constraints = []
components = []
######## components ###########
m = self.m = Variable("m", "kg", "Mass of Engine")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
########## constraints #########
m_prop = self.m_prop = Variable("m_{prop}", "kg", "Mass of Propellant")
m_dry = self.m_dry = Variable("m_{dry}", "kg", "Dry mass of engine")
# constraints += [m_dry >= 0.7 * m]
c = self.c = Variable("c", 2100, "m/s", "effective exhaust speed of engine")
F = self.F = Variable("F", 750, "N", "Engine thrust")
OF = self.OF = Variable("OF", 6, "", "Ox to fuel ratio")
m_ox = self.m_ox = Variable("m_{ox}", "kg", "ox mass")
m_fuel = self.m_fuel = Variable("m_{fuel}", "kg", "fuel mass")
constraints += [Tight([m_prop >= m_ox + m_fuel])]
constraints += [Tight([m_fuel * (OF + 1) >= m_prop])]
# constraints += [m_fuel * (OF + 1) <= m_prop]
constraints += [Tight([m_ox * (1 / OF + 1) >= m_prop])]
# constraints += [m_ox * (1 / OF + 1) <= m_prop]
constraints += [Tight([m >= m_prop + m_dry])]
# size the ox tank
v_ox = self.v_ox = Variable("V_{ox}", "cm^3", "Volume of ox tank")
l_ox = self.l_ox = Variable("L_{ox}", "m", "Length of ox tank")
t_wall = self.t = Variable("t_{wall}", "mm", "Wall Thickness of ox tank")
d = self.d = Variable("d_ox", 15, "cm", "Diameter of ox tank")
P_ox = self.P = Variable("Tank P", 80, "bar", "Max Ox Tank pressure")
sigma_max = Variable("\sigma_{max}", 430, "MPa", "Max stress of tank, Al-7075-T6")
# determine the wall thickness needed
SF = Variable("SF", 5, "", "Wall thickness safety factor")
constraints += [t_wall >= SF * P_ox * d / (2 * sigma_max)]
# determine volume required
# R = Variable("R", 8.314, "J/mol/K", "Ideal gas constant")
# T = Variable("T", 350, "K", "Tank temperature ")
# MM = Variable("MM", 44.1, "g/mol", "Molar mass of Nitrous")
rho_ox = Variable("rho_{ox}", 490, "kg/m^3", "density of liquid ox")
constraints += [v_ox >= (m_ox / rho_ox)]
# determine length of ox tank
constraints += [l_ox >= 4 * v_ox / (3.14 * d ** 2)]
m_ox_tank = Variable("m_{ox tank}", "kg", "Mass of ox tank")
rho_tank = Variable("rho_{ox, tank}", 2700, "kg/m^3", "Density of ox tank (if al)")
constraints += [m_ox_tank >= rho_tank * (3.14 * d * l_ox * t_wall)] # the 2 is for safety factor and endcaps
# grain tank sizing
m_grain_tank = Variable("m_{grain tank}", "kg", "Mass of grain tank")
rho_fuel = Variable("rho_{wax}", 900, "kg/m^3", "Density of fuel")
v_fuel = Variable("v_{fuel}", "cm^3", "Volume of fuel")
constraints += [Tight([v_fuel >= m_fuel/rho_fuel])]
# estimate port such that the grain area is half the cross section area
A_grain = Variable("A_{grain}", "cm^2","cross section area of grain")
constraints += [Tight([A_grain <= 0.5*3.14*(d/2)**2])]
#estimate length
l_grain = Variable("L_{grain}", "m", "Length of the grain")
constraints += [l_grain >= v_fuel/A_grain]
# estimate mass, assuming the thickness is the same as the ox
constraints += [Tight([m_grain_tank >= rho_tank * (3.14 * d * l_grain * t_wall)])]
m_valves = Variable("m_{valves}", 1, "kg", "Mass of valves and plumbing")
m_nozzle = Variable("m_{nozzle}", 1, "kg", "Mass of nozzle assembly")
constraints += [Tight([m_dry >= m_ox_tank + m_valves + m_grain_tank + m_nozzle])]
# impose bounds
constraints += [Loose([l_ox >= 0.5 * ureg.m, l_ox <= 2 * ureg.m])]
constraints += [Loose([t_wall >= 1 * ureg.mm, t_wall <= 20 * ureg.mm])]
return [components, constraints]
class Engine(Model):
def setup(self):
constraints = []
components = self.components = []
######### components ##########
ox_assembly = self.ox_assembly = EngineOxAssembly()
valve_assembly = self.valve_assembly = EngineValveAssembly()
fuel_assembly = self.fuel_assembly = EngineFuelAssembly()
nozzle_assembly = self.nozzle_assembly = EngineNozzleAssembly()
components += [ox_assembly, valve_assembly, fuel_assembly, nozzle_assembly]
m = self.m = Variable("m", "kg", "Mass of Engine")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
######### constraints #########
# constraints += [m >= 6 * ureg.kg]
# define m_prop
m_prop = self.m_prop = Variable("m_{prop}", "kg", "Propellant Mass")
constraints += [Tight([m_prop >= ox_assembly.ox.m + fuel_assembly.fuel.m])]
# define by mass fraction
# constraints += [m >= m_prop/0.3]
c = Variable("c", 2000, "m/s", "Main engine effective exhaust speed")
return [constraints, components]
class EngineOxAssembly(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
oxtank = self.oxtank = EngineOxTank()
ox = self.ox = EngineOx()
components += [oxtank, ox]
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineOxTank(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineOx(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineValveAssembly(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineFuelAssembly(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
fuel = self.fuel = EngineFuel()
enclosure = self.enclosure = EngineFuelEnclosure()
components += [fuel, enclosure]
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineFuel(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineFuelEnclosure(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
class EngineNozzleAssembly(Model):
def setup(self):
constraints = []
components = self.components = []
####### components ######
m = self.m = Variable("m", "kg", "Mass of Engine Tank")
if len(components) > 0:
constraints += [Tight([m >= sum(comp.m for comp in components)])]
#######
constraints += [m >= 2 * ureg.kg]
return [components, constraints]
| [
"dra16@ic.ac.uk"
] | dra16@ic.ac.uk |
22d1c6a2d75f0cc23740d21dd8851259c7042728 | 8f5eb43f1331d08ffd7ba7617313f9b3823ab36e | /Profiling/src/instrumentation.py | bb3454feaaa722b662eced4914135c98dd112e67 | [] | no_license | felipebetancur/WCET-1 | d68ebcd50388116d001b0ca5a495d8b7e838224d | e148e4d564c6df5488a390de888bfb1b5fedab61 | refs/heads/master | 2021-01-22T13:53:00.787666 | 2015-03-13T14:27:25 | 2015-03-13T14:27:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,656 | py | import udraw
import config
import vertices
import random
def remove_vertices(enhanced_cfg, to_remove):
for v in to_remove:
enhanced_cfg.remove_vertex(v)
def relink_vertex_predecessors_to_successors(enhanced_cfg, v):
for predID in v.predecessors.keys():
for succID in v.successors.keys():
if not enhanced_cfg.has_edge(predID, succID):
enhanced_cfg.add_edge(predID, succID)
def eliminate_program_points_which_are_not_profiled(enhanced_cfg, program_points_to_profile):
vertices_to_profile = set()
edges_to_profile = set()
edge_endpoints = set()
for a_program_point in program_points_to_profile:
if isinstance(a_program_point, tuple):
edges_to_profile.add(a_program_point)
edge_endpoints.add(a_program_point[0])
edge_endpoints.add(a_program_point[1])
else:
vertices_to_profile.add(a_program_point)
to_remove = set()
for v in enhanced_cfg:
if v.vertexID != enhanced_cfg.entryID \
and v.vertexID != enhanced_cfg.exitID:
if isinstance(v, vertices.CFGEdge):
if v.edge[0] not in vertices_to_profile \
and v.edge[1] not in vertices_to_profile \
and v.edge not in edges_to_profile:
to_remove.add(v)
relink_vertex_predecessors_to_successors(enhanced_cfg, v)
else:
if v.vertexID not in vertices_to_profile \
and v.vertexID not in edge_endpoints:
to_remove.add(v)
relink_vertex_predecessors_to_successors(enhanced_cfg, v)
remove_vertices(enhanced_cfg, to_remove)
def eliminate_cfg_edge_vertices(enhanced_cfg):
to_remove = set()
for v in enhanced_cfg:
if v.vertexID != enhanced_cfg.entryID \
and v.vertexID != enhanced_cfg.exitID:
if isinstance(v, vertices.CFGEdge):
to_remove.add(v)
relink_vertex_predecessors_to_successors(enhanced_cfg, v)
remove_vertices(enhanced_cfg, to_remove)
def eliminate_cfg_vertex_vertices(enhanced_cfg):
to_remove = set()
for v in enhanced_cfg:
if v.vertexID != enhanced_cfg.entryID \
and v.vertexID != enhanced_cfg.exitID:
if not isinstance(v, vertices.CFGEdge):
to_remove.add(v)
relink_vertex_predecessors_to_successors(enhanced_cfg, v)
remove_vertices(enhanced_cfg, to_remove)
def reduce_enhanced_cfg(enhanced_cfg):
vertex_list = enhanced_cfg.the_vertices.values()
random.shuffle(vertex_list)
for v in vertex_list:
if v.vertexID != enhanced_cfg.entryID \
and v.vertexID != enhanced_cfg.exitID:
remove_vertex = True
for predID in v.predecessors.keys():
for succID in v.successors.keys():
if enhanced_cfg.has_edge(predID, succID):
remove_vertex = False
if remove_vertex:
relink_vertex_predecessors_to_successors(enhanced_cfg, v)
enhanced_cfg.remove_vertex(v)
def do_instrumentation(cfg, program_points_to_profile):
enhanced_cfg = cfg.get_enhanced_cfg()
if config.Arguments.instrument == "vertices":
eliminate_cfg_edge_vertices(enhanced_cfg)
elif config.Arguments.instrument == "edges":
eliminate_cfg_vertex_vertices(enhanced_cfg)
eliminate_program_points_which_are_not_profiled(enhanced_cfg, program_points_to_profile)
reduce_enhanced_cfg(enhanced_cfg)
udraw.make_file(enhanced_cfg, "%s.enhanced" % enhanced_cfg.name)
| [
"a.betts@imperial.ac.uk"
] | a.betts@imperial.ac.uk |
f2212a99bb2cf06fa216bca71f657bb19d3a6cd2 | 553f801bea01707f30b39846d008759237cb826e | /Scripts/rename.py | 54d2af317072927846a6ca9b0f1dc4622d1b2777 | [
"MIT"
] | permissive | ChristopherStavros/General | 55241e5823df2fc763a3103c3c2ade2fa833301d | 7701fee0a2db8a8f4b2cfe80e57eb10ed86a89a8 | refs/heads/master | 2020-03-26T01:02:59.572695 | 2019-05-12T21:37:17 | 2019-05-12T21:37:17 | 144,349,708 | 0 | 1 | null | 2019-01-02T18:36:46 | 2018-08-11T02:40:03 | PowerShell | UTF-8 | Python | false | false | 403 | py | import os, sys, shutil
script_path = os.path.dirname(sys.argv[0])
if script_path[-1:]=='\\':
script_path = script_path.strip('\\')
for f in os.listdir(script_path):
if 'notes_' in f:
shutil.copy2('{}/{}'.format(script_path, f), '{}/{}'.format(script_path, f.replace('notes_', '')))
#os.rename('{}/{}'.format(script_path, f), '{}/{}'.format(script_path, f.replace('notes_', '')) | [
"christopher.kurkoski@gmail.com"
] | christopher.kurkoski@gmail.com |
b847d5a37a190f795512d1c3a3ab13e6d4fcebb6 | 1472b262cb8a3032abcfb51cf3f2a9e094cfee70 | /FirstPage.py | a58769afbbfd4bf983dcdabd4bd53afc1d67e8fb | [] | no_license | kusmakharpathak/system_admin_group | 5413c80a550a2da78a899f3c37a8ffa261ec86d3 | 725cdaea6847db0946c5d819a5090feacdbebee5 | refs/heads/main | 2023-03-11T13:12:12.458059 | 2021-02-25T19:40:12 | 2021-02-25T19:40:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,962 | py | from tkinter import *
from tkinter.ttk import Combobox
w = Tk()
w.geometry("900x700")
w['background']='#6EB69E'
f1 = Frame(w,width = 100,height = 100,background = '#E0F1F0',highlightthickness = 3)
f1.grid(row=0,column=0,ipadx = 10,ipady=10,padx=1,pady=1)
admin = Label(f1,text="Admin",fg="black",bg='#E0F1F0', font=("Arial Bold", 50))
admin.grid(row=0,column=0,ipadx = 10,ipady=10,padx=1,pady=1)
f2 =Frame(w,width = 700,height = 100,background = '#A6CBCB',highlightthickness = 3)
f2.grid(row=0,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
cn = Label(f2,text="Company Name",fg="yellow",bg="#A6CBCB",font=("Arial Bold", 50))
cn.grid(row=0,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
f3 =Frame(w,background = '#6EB69E',highlightthickness = 3)
f3.grid(row=1,column=0,ipadx = 1,ipady=1,padx=1,pady=1)
db = Button(f3,text="Dashboard",fg="white",bg="#726F71", font=("Arial Bold", 20), width = 9)
db.grid(row=1,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
q = Button(f3,text="Queries",fg="white",bg="#726F71", font=("Arial Bold", 20), width = 9)
q.grid(row=2,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
login = Button(f3,text="Login",fg="white",bg="#726F71", font=("Arial Bold", 20), width = 9)
login.grid(row=3,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
b = Label(f3,text="",bg="#6EB69E", font=("Arial Bold", 20))
b.grid(row=4,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
b1 = Label(f3,text="",bg="#6EB69E", font=("Arial Bold", 20))
b1.grid(row=5,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
logout = Button(f3,text="Logout",fg="white",bg="#C7D4D9", font=("Arial Bold", 20), width = 9)
logout.grid(row=6,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
quit = Button(f3,text="Quit",fg="white",bg="#C7D4D9", font=("Arial Bold", 20), width = 9)
quit.grid(row=7,column=1,ipadx = 10,ipady=10,padx=1,pady=1)
f4 =Frame(w,background = '#6EB69E',highlightthickness = 3)
f4.grid(row=1,column=1,ipadx = 1,ipady=1,padx=1,pady=1)
t1 = Text(f4,height=2,width=70)
t1.grid(row=1, column=0)
t1.insert(END,"Login By -")
t2 = Text(f4,height=2,width=70)
t2.grid(row=2, column=0)
t3 =Text(f4,height=2,width=70)
t3.grid(row=3, column=0)
t4 = Text(f4,height=2,width=70)
t4.grid(row=4, column=0)
t5 = Text(f4,height=2,width=70)
t5.grid(row=5, column=0)
t6 = Text(f4,height=2,width=70)
t6.grid(row=6, column=0)
t7 = Text(f4,height=2,width=70)
t7.grid(row=7, column=0)
t8 = Text(f4,height=2,width=70)
t8.grid(row=8, column=0)
t9 = Text(f4,height=2,width=70)
t9.grid(row=9, column=0)
t0 = Text(f4,height=2,width=70)
t0.grid(row=10, column=0)
t10 = Text(f4,height=2,width=70)
t10.grid(row=11, column=0)
t11 = Text(f4,height=2,width=70)
t11.grid(row=12, column=0)
t2.insert(END,"Login By -")
t3.insert(END,"Login By -")
t4.insert(END,"Login By -")
t5.insert(END,"Login By -")
t6.insert(END,"Login By -")
t7.insert(END,"Login By -")
t8.insert(END,"Login By -")
t9.insert(END,"Login By -")
t0.insert(END,"Login By -")
t10.insert(END,"Login By -")
t11.insert(END,"Login By -")
w.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
0d112d9d22bf6d95c471ac0daf7bbcf566c2e834 | 1af288c99de84ca38b0df3469fa468b45473eb13 | /src/globals/constants.py | 7e84bee181145119688cf72d20eace0f6991bcbb | [] | no_license | bhavye9499/DIP-Project-MangaColorization | 3aea11bef39d4f7b142c1929d929a337ccebd3d6 | ae6b1f9f6c75f78c874414da5508be2dc4880198 | refs/heads/main | 2023-05-31T20:43:17.351616 | 2021-06-29T20:08:39 | 2021-06-29T20:08:39 | 317,637,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | import enum
COMMON_COLORS = (
'--Select--',
'Black',
'Blue',
'Brown',
'Green',
'Hot-Pink',
'Mustard',
'Orange',
'Peach',
'Purple',
'Red',
'White',
'Yellow',
)
COMMON_COLORS_HEX_CODES = {
'Black': '#000000',
'Blue': '#0000ff',
'Brown': '#964b00',
'Green': '#00ff00',
'Hot-Pink': '#ff69b4',
'Mustard': '#eaaa00',
'Orange': '#ffa500',
'Peach': '#ffe5b4',
'Purple': '#800080',
'Red': '#ff0000',
'White': '#ffffff',
'Yellow': '#ffff00',
}
FILE_TYPES = [
('All files', '*'),
('BMP files', '*.bmp'),
('JPG files', '*.jpg'),
('JPEG files', '*.jpeg'),
('PNG files', '*.png'),
]
EVENT_FLAG_ALTKEY = 'Alt'
EVENT_FLAG_CTRLKEY = 'Control'
EVENT_FLAG_ENTERKEY = 'Return'
EVENT_FLAG_KEYPRESS = 'Key'
EVENT_FLAG_SHIFTKEY = 'Shift'
EVENT_LBUTTONDOWN = 'ButtonPress'
EVENT_LBUTTONUP = 'ButtonRelease'
EVENT_MOUSEMOVE = 'Motion'
FORMAT_JPG = 'jpg'
FORMAT_JPEG = 'jpeg'
FORMAT_PNG = 'png'
class Colorization(enum.Enum):
color_replacement = 1
pattern_to_shading = 2
stroke_preserving = 3
class PixelType(enum.Enum):
start_pixel = 1
region_pixel = 2
class Region(enum.Enum):
intensity = 1
pattern = 2
| [
"bhavye17038@iiitd.ac.in"
] | bhavye17038@iiitd.ac.in |
4e9e117b4208da744ba02a9299faa7b6caea4145 | 45d17ca56a111c550272214ee555e91f3cf8ea08 | /ERROR404/QuestionDiagonosisTkinter.py | 047c35e2258ad369b066d131e6b96af5a57995ec | [] | no_license | bipul2002star/HealthCare-Jarvis-bot | d0ad0dc4979a87bbde7b7090ce44781d43f35611 | 8e2c9fcce2173ab2ce9a68f86bc74aa1ed1b49fd | refs/heads/main | 2023-04-12T08:45:42.956875 | 2021-04-25T12:29:02 | 2021-04-25T12:29:02 | 361,421,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,582 | py | from tkinter import *
from tkinter import messagebox
import os
import webbrowser
import numpy as np
import pandas as pd
class HyperlinkManager:
def __init__(self, text):
self.text = text
self.text.tag_config("hyper", foreground="sienna2", underline=1)
self.text.tag_bind("hyper", "<Enter>", self._enter)
self.text.tag_bind("hyper", "<Leave>", self._leave)
self.text.tag_bind("hyper", "<Button-1>", self._click)
self.reset()
def reset(self):
self.links = {}
def add(self, action):
tag = "hyper-%d" % len(self.links)
self.links[tag] = action
return "hyper", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(CURRENT):
if tag[:6] == "hyper-":
self.links[tag]()
return
# Importing the dataset
training_dataset = pd.read_csv('Training.csv')
test_dataset = pd.read_csv('Testing.csv')
# Slicing and Dicing the dataset to separate features from predictions
X = training_dataset.iloc[:, 0:132].values
Y = training_dataset.iloc[:, -1].values
# Dimensionality Reduction for removing redundancies
dimensionality_reduction = training_dataset.groupby(training_dataset['prognosis']).max()
# Encoding String values to integer constants
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(Y)
# Splitting the dataset into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Implementing the Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
# Saving the information of columns
cols = training_dataset.columns
cols = cols[:-1]
# Checking the Important features
importances = classifier.feature_importances_
indices = np.argsort(importances)[::-1]
features = cols
# Implementing the Visual Tree
from sklearn.tree import _tree
# Method to simulate the working of a Chatbot by extracting and formulating questions
def print_disease(node):
#print(node)
node = node[0]
#print(len(node))
val = node.nonzero()
#print(val)
disease = labelencoder.inverse_transform(val[0])
return disease
def recurse(node, depth):
global val,ans
global tree_,feature_name,symptoms_present
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = tree_.threshold[node]
yield name + " ?"
# ans = input()
ans = ans.lower()
if ans == 'yes':
val = 1
else:
val = 0
if val <= threshold:
yield from recurse(tree_.children_left[node], depth + 1)
else:
symptoms_present.append(name)
yield from recurse(tree_.children_right[node], depth + 1)
else:
strData=""
present_disease = print_disease(tree_.value[node])
# print( "You may have " + present_disease )
# print()
strData="You may have :" + str(present_disease)
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
red_cols = dimensionality_reduction.columns
symptoms_given = red_cols[dimensionality_reduction.loc[present_disease].values[0].nonzero()]
# print("symptoms present " + str(list(symptoms_present)))
# print()
strData="symptoms present: " + str(list(symptoms_present))
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
# print("symptoms given " + str(list(symptoms_given)) )
# print()
strData="symptoms given: " + str(list(symptoms_given))
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
confidence_level = (1.0*len(symptoms_present))/len(symptoms_given)
# print("confidence level is " + str(confidence_level))
# print()
strData="confidence level is: " + str(confidence_level)
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
# print('The model suggests:')
# print()
strData='The model suggests:'
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
row = doctors[doctors['disease'] == present_disease[0]]
# print('Consult ', str(row['name'].values))
# print()
strData='Consult '+ str(row['name'].values)
QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
# print('Visit ', str(row['link'].values))
#print(present_disease[0])
hyperlink = HyperlinkManager(QuestionDigonosis.objRef.txtDigonosis)
strData='Visit '+ str(row['link'].values[0])
def click1():
webbrowser.open_new(str(row['link'].values[0]))
QuestionDigonosis.objRef.txtDigonosis.insert(INSERT, strData, hyperlink.add(click1))
#QuestionDigonosis.objRef.txtDigonosis.insert(END,str(strData)+'\n')
yield strData
def tree_to_code(tree, feature_names):
global tree_,feature_name,symptoms_present
tree_ = tree.tree_
#print(tree_)
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
#print("def tree({}):".format(", ".join(feature_names)))
symptoms_present = []
# recurse(0, 1)
def execute_bot():
# print("Please reply with yes/Yes or no/No for the following symptoms")
tree_to_code(classifier,cols)
# This section of code to be run after scraping the data
doc_dataset = pd.read_csv('doctors_dataset.csv', names = ['Name', 'Description'])
diseases = dimensionality_reduction.index
diseases = pd.DataFrame(diseases)
doctors = pd.DataFrame()
doctors['name'] = np.nan
doctors['link'] = np.nan
doctors['disease'] = np.nan
doctors['disease'] = diseases['prognosis']
doctors['name'] = doc_dataset['Name']
doctors['link'] = doc_dataset['Description']
record = doctors[doctors['disease'] == 'AIDS']
record['name']
record['link']
# Execute the bot and see it in Action
#execute_bot()
class QuestionDigonosis(Frame):
objIter=None
objRef=None
def __init__(self,master=None):
master.title("Question")
# root.iconbitmap("")
master.state("z")
# master.minsize(700,350)
QuestionDigonosis.objRef=self
super().__init__(master=master)
self["bg"]="light blue"
self.createWidget()
self.iterObj=None
def createWidget(self):
self.lblQuestion=Label(self,text="Question",width=12,bg="bisque")
self.lblQuestion.grid(row=0,column=0,rowspan=4)
self.lblDigonosis = Label(self, text="Digonosis",width=12,bg="bisque")
self.lblDigonosis.grid(row=4, column=0,sticky="n",pady=5)
# self.varQuestion=StringVar()
self.txtQuestion = Text(self, width=100,height=4)
self.txtQuestion.grid(row=0, column=1,rowspan=4,columnspan=20)
self.varDiagonosis=StringVar()
self.txtDigonosis =Text(self, width=100,height=14)
self.txtDigonosis.grid(row=4, column=1,columnspan=20,rowspan=20,pady=5)
self.btnNo=Button(self,text="No",width=12,bg="bisque", command=self.btnNo_Click)
self.btnNo.grid(row=25,column=0)
self.btnYes = Button(self, text="Yes",width=12,bg="bisque", command=self.btnYes_Click)
self.btnYes.grid(row=25, column=1,columnspan=20,sticky="e")
self.btnClear = Button(self, text="Clear",width=12,bg="bisque", command=self.btnClear_Click)
self.btnClear.grid(row=27, column=0)
self.btnStart = Button(self, text="Start",width=12,bg="bisque", command=self.btnStart_Click)
self.btnStart.grid(row=27, column=1,columnspan=20,sticky="e")
def btnNo_Click(self):
global val,ans
global val,ans
ans='no'
str1=QuestionDigonosis.objIter.__next__()
self.txtQuestion.delete(0.0,END)
self.txtQuestion.insert(END,str1+"\n")
def btnYes_Click(self):
global val,ans
ans='yes'
self.txtDigonosis.delete(0.0,END)
str1=QuestionDigonosis.objIter.__next__()
# self.txtDigonosis.insert(END,str1+"\n")
def btnClear_Click(self):
self.txtDigonosis.delete(0.0,END)
self.txtQuestion.delete(0.0,END)
def btnStart_Click(self):
execute_bot()
self.txtDigonosis.delete(0.0,END)
self.txtQuestion.delete(0.0,END)
self.txtDigonosis.insert(END,"Please Click on Yes or No for the Above symptoms in Question")
QuestionDigonosis.objIter=recurse(0, 1)
str1=QuestionDigonosis.objIter.__next__()
self.txtQuestion.insert(END,str1+"\n")
class MainForm(Frame):
main_Root = None
def destroyPackWidget(self, parent):
for e in parent.pack_slaves():
e.destroy()
def __init__(self, master=None):
MainForm.main_Root = master
super().__init__(master=master)
master.geometry("540x564")
master.title("Account Login")
self.createWidget()
def createWidget(self):
self.lblMsg=Label(self, text="Health Care Chatbot", bg="IndianRed2", width="300", height="4", font=("Comic Sans MS", 24, "bold"))
self.lblMsg.pack()
self.btnLogin=Button(self, text="Login", height="2", bg="SeaGreen1", width="300", font=("Comic Sans MS", 18, "bold"), command = self.lblLogin_Click)
self.btnLogin.pack()
self.btnRegister=Button(self, text="Register", height="2", bg="salmon", width="300", font=("Comic Sans MS", 18, "bold"), command = self.btnRegister_Click)
self.btnRegister.pack()
self.lblTeam=Label(self, text="Presented by: Team ERROR404", bg="VioletRed3", width = "250", height = "2", font=("Comic Sans MS", 15, "italic"))
self.lblTeam.pack()
self.lblTeam1=Label(self, text="Divyansh Tripathi", bg="RosyBrown1", width = "250", height = "1", font=("Comic Sans MS", 14))
self.lblTeam1.pack()
self.lblTeam3=Label(self, text="Bipul Gautam", bg="RosyBrown2", width = "250", height = "1", font=("Comic Sans MS", 14))
self.lblTeam3.pack()
self.lblTeam2=Label(self, text="Koneru Rehasree", bg="RosyBrown1", width = "250", height = "1", font=("Comic Sans MS", 14))
self.lblTeam2.pack()
self.lblTeam3=Label(self, text="Kodipelly Sai Ganesh", bg="RosyBrown2", width = "250", height = "1", font=("Comic Sans MS", 14))
self.lblTeam3.pack()
def lblLogin_Click(self):
self.destroyPackWidget(MainForm.main_Root)
frmLogin=Login(MainForm.main_Root)
frmLogin.pack()
def btnRegister_Click(self):
self.destroyPackWidget(MainForm.main_Root)
frmSignUp = SignUp(MainForm.main_Root)
frmSignUp.pack()
class Login(Frame):
main_Root=None
def destroyPackWidget(self,parent):
for e in parent.pack_slaves():
e.destroy()
def __init__(self, master=None):
Login.main_Root=master
super().__init__(master=master)
master.title("Login Form")
master.geometry("540x564")
self.createWidget()
def createWidget(self):
self.lblMsg=Label(self, text="Please enter the details below to login", bg="SpringGreen2", width="300", height="4", font=("Comic Sans MS", 16, "bold"))
self.lblMsg.pack()
self.username=Label(self, text="Username * ", height="2", font=("Times New Roman", 12))
self.username.pack()
self.username_verify = StringVar()
self.username_login_entry = Entry(self, textvariable=self.username_verify)
self.username_login_entry.pack()
self.password=Label(self, text="Password * ", height="2", font=("Times New Roman", 12))
self.password.pack()
self.password_verify = StringVar()
self.password_login_entry = Entry(self, textvariable=self.password_verify, show='*')
self.password_login_entry.pack()
self.btnLogin=Button(self, text="Login", width=12, height=1, command=self.btnLogin_Click)
self.btnLogin.pack()
def btnLogin_Click(self):
username1 = self.username_login_entry.get()
password1 = self.password_login_entry.get()
# messagebox.showinfo("Failure", self.username1+":"+password1)
list_of_files = os.listdir()
if username1 in list_of_files:
file1 = open(username1, "r")
verify = file1.read().splitlines()
if password1 in verify:
messagebox.showinfo("Sucess","Login Sucessful")
self.destroyPackWidget(Login.main_Root)
frmQuestion = QuestionDigonosis(Login.main_Root)
frmQuestion.pack()
else:
messagebox.showinfo("Failure", "Login Details are wrong try again")
else:
messagebox.showinfo("Failure", "User not found try from another user\n or sign up for new user")
class SignUp(Frame):
main_Root=None
print("SignUp Class")
def destroyPackWidget(self,parent):
for e in parent.pack_slaves():
e.destroy()
def __init__(self, master=None):
SignUp.main_Root=master
master.title("Register")
super().__init__(master=master)
master.title("Register")
master.geometry("540x564")
self.createWidget()
def createWidget(self):
self.lblMsg=Label(self, text="Please enter details below", bg="tomato", width="300", height="4", font=("Comic Sans MS", 16, "bold"))
self.lblMsg.pack()
self.username_lable = Label(self, text="Username * ", height="2", font=("Times New Roman", 12))
self.username_lable.pack()
self.username = StringVar()
self.username_entry = Entry(self, textvariable=self.username)
self.username_entry.pack()
self.password_lable = Label(self, text="Password * ", height="2", font=("Times New Roman", 12))
self.password_lable.pack()
self.password = StringVar()
self.password_entry = Entry(self, textvariable=self.password, show='*')
self.password_entry.pack()
self.btnRegister=Button(self, text="Register", width=16, height=1, bg="khaki2", command=self.register_user)
self.btnRegister.pack()
def register_user(self):
file = open(self.username_entry.get(), "w")
file.write(self.username_entry.get() + "\n")
file.write(self.password_entry.get())
file.close()
self.destroyPackWidget(SignUp.main_Root)
self.lblSucess=Label(root, text="Registration Success", fg="green", bg="SpringGreen1", width="300", height="4", font=("Comic Sans MS", 16, "bold"))
self.lblSucess.pack()
self.btnSucess=Button(root, text="Click Here to proceed", width="300", height="2", font=("Comic Sans MS", 12, "italic"), command=self.btnSucess_Click)
self.btnSucess.pack()
def btnSucess_Click(self):
self.destroyPackWidget(SignUp.main_Root)
frmQuestion = QuestionDigonosis(SignUp.main_Root)
frmQuestion.pack()
root = Tk()
frmMainForm=MainForm(root)
frmMainForm.pack()
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
6799732e6efdadc03999d9a6551f7d070c25df4b | f5a002ae3b12a750fa3c777e8ce239677d0f8db5 | /accounts_mode_voucher/model_purchase_account_signal.py | a6dbc629f75dc259c7f63f80103a13fc3d8d5143 | [] | no_license | abhishek-ag2000/working123 | f217911c6f54091b6412f1bf7e594998cab5cbda | e9e07d98011a4db812d06e50de4063e305dbc1d9 | refs/heads/master | 2022-12-14T03:54:54.064379 | 2019-09-14T14:10:58 | 2019-09-14T14:10:58 | 188,194,278 | 0 | 0 | null | 2022-12-08T05:21:08 | 2019-05-23T08:37:53 | JavaScript | UTF-8 | Python | false | false | 6,078 | py | """
Signals for Purchase
"""
from django.db.models import Value, Sum
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete
from django.db.models.functions import Coalesce
from django.dispatch import receiver
from accounting_entry.models import JournalVoucher
from accounting_entry.decorators import prevent_signal_call_on_bulk_load
from .model_purchase_accounts import PurchaseVoucherAccounts, PurchaseTermAccounts, PurchaseTaxAccounts
# @receiver(pre_save, sender=PurchaseVoucherAccounts)
# @prevent_signal_call_on_bulk_load
# def update_subtotal(sender, instance, *args, **kwargs):
# """
# Signal to calculate the sub total of every goods in a particular voucher
# """
# total_ledger = instance.purchase_voucher_term_accounts.aggregate(
# the_sum=Coalesce(Sum('total'), Value(0)))['the_sum']
# if total_ledger:
# instance.sub_total = total_ledger
# @receiver(pre_save, sender=PurchaseVoucherAccounts)
# @prevent_signal_call_on_bulk_load
# def update_totalgst_accounts(sender, instance, *args, **kwargs):
# """
# Signal to calculate the GST totals of a particular voucher
# """
# total_cgst_extra = instance.purchase_voucher_term_accounts.aggregate(
# the_sum=Coalesce(Sum('cgst_total'), Value(0)))['the_sum']
# total_sgst_extra = instance.purchase_voucher_term_accounts.aggregate(
# the_sum=Coalesce(Sum('sgst_total'), Value(0)))['the_sum']
# total_igst_extra = instance.purchase_voucher_term_accounts.aggregate(
# the_sum=Coalesce(Sum('igst_total'), Value(0)))['the_sum']
# if total_cgst_extra:
# instance.cgst_total = total_cgst_extra
# if total_sgst_extra:
# instance.sgst_total = total_sgst_extra
# if total_igst_extra:
# instance.igst_total = total_igst_extra
@receiver(pre_save, sender=PurchaseVoucherAccounts)
@prevent_signal_call_on_bulk_load
def update_total_tax_accounts(sender, instance, *args, **kwargs):
"""
Signal to calculate the Tax totals of a particular voucher in case of Composite billing
"""
total_tax_extra = instance.purchase_voucher_term_accounts.aggregate(
the_sum=Coalesce(Sum('tax_total'), Value(0)))['the_sum']
if total_tax_extra:
instance.tax_total = total_tax_extra
# @receiver(pre_save, sender=PurchaseVoucherAccounts)
# @prevent_signal_call_on_bulk_load
# def update_purchase_grand_total(sender, instance, *args, **kwargs):
# """
# Signal to calculate the Grand Total of a particular voucher
# """
@receiver(post_save, sender=PurchaseTermAccounts)
@prevent_signal_call_on_bulk_load
def user_created_purchase_accounts_journal_accounts(sender, instance, created, **kwargs):
"""
Signals to create a journal entry for the additional charges Ledger in a particular entry
"""
c = JournalVoucher.objects.filter(
user=instance.purchase_voucher.user, company=instance.purchase_voucher.company).count() + 1
if instance.total != 0:
JournalVoucher.objects.update_or_create(
voucher_id=instance.id,
voucher_type="Charges_against_Purchase",
defaults={
'counter': c,
'user': instance.purchase_voucher.user,
'company': instance.purchase_voucher.company,
'voucher_date': instance.purchase_voucher.voucher_date,
'cr_ledger': instance.purchase_voucher.party_ac,
'dr_ledger': instance.ledger,
'amount': instance.total}
)
@receiver(post_save, sender=PurchaseTaxAccounts)
@prevent_signal_call_on_bulk_load
def user_created_purchase_gst_charge_accounts(sender, instance, created, **kwargs):
"""
Signal to create a jounal entry for the GST ledgers in a particular voucher
"""
c = JournalVoucher.objects.filter(
user=instance.purchase_voucher.user, company=instance.purchase_voucher.company).count() + 1
if instance.ledger:
JournalVoucher.objects.update_or_create(
voucher_id=instance.id,
voucher_type="Tax_against_Purchase",
defaults={
'counter': c,
'user': instance.purchase_voucher.user,
'company': instance.purchase_voucher.company,
'voucher_date': instance.purchase_voucher.voucher_date,
'cr_ledger': instance.purchase_voucher.party_ac,
'dr_ledger': instance.ledger,
'amount': instance.total}
)
@receiver(pre_delete, sender=PurchaseVoucherAccounts)
def delete_journal_voucher_against_terms_purchases_accounts(sender, instance, **kwargs):
"""
Signal to delete a journal entry whenever a additional ledger is deleted from a voucher
"""
purchase_voucher_term = PurchaseTermAccounts.objects.filter(purchase_voucher=instance)
for s in purchase_voucher_term:
s.save()
JournalVoucher.objects.filter(
company=s.purchase_voucher.company, voucher_id=s.id).delete()
@receiver(pre_delete, sender=PurchaseVoucherAccounts)
def delete_journal_voucher_against_tax_purchases_accounts(sender, instance, **kwargs):
"""
Signal to delete a journal entry whenever a GST ledger is removed from a particular voucher
"""
purchase_voucher_tax = PurchaseTaxAccounts.objects.filter(
purchase_voucher=instance)
for s in purchase_voucher_tax:
s.save()
JournalVoucher.objects.filter(
company=s.purchase_voucher.company, voucher_id=s.id).delete()
@receiver(pre_delete, sender=PurchaseVoucherAccounts)
def delete_related_journal_accounts(sender, instance, **kwargs):
"""
Signal to delete a jounal entry whenever a purchase entry is deleted
"""
JournalVoucher.objects.filter(
user=instance.user, company=instance.company, voucher_id=instance.id).delete()
@receiver(pre_delete, sender=PurchaseVoucherAccounts)
def delete_related_party_ledger_purchase_accounts(sender, instance, **kwargs):
instance.party_ac.save()
instance.party_ac.ledger_group.save()
| [
"ratanbiswas.it@gmail.com"
] | ratanbiswas.it@gmail.com |
7becadaa213cd03cf79e3e0b8cb057367af3bfac | 117078195d04c6b4a8919cb3d426f7434081c222 | /polarmine/alternative/alternative_solver.py | 6ca0dda71ea0acc77c69a5684bb42605ae60e574 | [
"MIT"
] | permissive | morpheusthewhite/master-thesis | 6825e15dd7a958306ebbff5b5037cfdeaa119c5e | 2ab4c0509a119d7b5f332b842a4101470a884351 | refs/heads/master | 2023-07-11T07:11:20.353983 | 2021-08-10T13:01:42 | 2021-08-10T13:01:42 | 330,761,354 | 1 | 0 | MIT | 2021-03-16T15:57:16 | 2021-01-18T19:03:50 | Python | UTF-8 | Python | false | false | 453 | py | from typing import List
from abc import ABC, abstractmethod
from polarmine.graph import InteractionGraph
class AlternativeSolver(ABC):
"""A solver for an alternative formulation of Echo Chamber Problem"""
def __init__(self, *args, **kwargs):
super(AlternativeSolver, self).__init__(*args, **kwargs)
@abstractmethod
def solve(
self, graph: InteractionGraph, alpha: float
) -> tuple[float, List[int]]:
pass
| [
"zffromGerace@hotmail.it"
] | zffromGerace@hotmail.it |
d2f6e5faa8e1f124af00e0502dca3ad30670785e | b5fabc6c6de064690f8d4ee423001cf9365a3d9f | /flash/image/segmentation/model.py | 9296db60cbcff1e6220d5aee051ddb36549a8b1f | [
"Apache-2.0"
] | permissive | dmarx/lightning-flash | 021dfd76bde6e30309f14feb5853020b0babe90d | 4cda031c1f9c8d8754fd36b5720d2a5a7d866765 | refs/heads/master | 2023-09-06T06:24:29.856354 | 2021-11-24T23:38:14 | 2021-11-24T23:38:14 | 422,352,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,182 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Union
import torch
from torch import nn
from torch.nn import functional as F
from torchmetrics import IoU
from flash.core.classification import ClassificationTask
from flash.core.data.io.input import DataKeys
from flash.core.data.io.output_transform import OutputTransform
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _KORNIA_AVAILABLE
from flash.core.utilities.isinstance import _isinstance
from flash.core.utilities.types import (
LOSS_FN_TYPE,
LR_SCHEDULER_TYPE,
METRICS_TYPE,
OPTIMIZER_TYPE,
OUTPUT_TRANSFORM_TYPE,
OUTPUT_TYPE,
)
from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
from flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS
from flash.image.segmentation.output import SegmentationLabels
if _KORNIA_AVAILABLE:
import kornia as K
class SemanticSegmentationOutputTransform(OutputTransform):
def per_sample_transform(self, sample: Any) -> Any:
resize = K.geometry.Resize(sample[DataKeys.METADATA]["size"][-2:], interpolation="bilinear")
sample[DataKeys.PREDS] = resize(sample[DataKeys.PREDS])
sample[DataKeys.INPUT] = resize(sample[DataKeys.INPUT])
return super().per_sample_transform(sample)
class SemanticSegmentation(ClassificationTask):
"""``SemanticSegmentation`` is a :class:`~flash.Task` for semantic segmentation of images. For more details, see
:ref:`semantic_segmentation`.
Args:
num_classes: Number of classes to classify.
backbone: A string or model to use to compute image features.
backbone_kwargs: Additional arguments for the backbone configuration.
head: A string or (model, num_features) tuple to use to compute image features.
head_kwargs: Additional arguments for the head configuration.
pretrained: Use a pretrained backbone.
loss_fn: Loss function for training.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`
package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict
containing a combination of the aforementioned. In all cases, each metric needs to have the signature
`metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.IOU`.
learning_rate: Learning rate to use for training.
multi_label: Whether the targets are multi-label or not.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
output_transform: :class:`~flash.core.data.io.output_transform.OutputTransform` use for post processing samples.
"""
output_transform_cls = SemanticSegmentationOutputTransform
backbones: FlashRegistry = SEMANTIC_SEGMENTATION_BACKBONES
heads: FlashRegistry = SEMANTIC_SEGMENTATION_HEADS
required_extras: str = "image"
def __init__(
self,
num_classes: int,
backbone: Union[str, nn.Module] = "resnet50",
backbone_kwargs: Optional[Dict] = None,
head: str = "fpn",
head_kwargs: Optional[Dict] = None,
pretrained: Union[bool, str] = True,
loss_fn: LOSS_FN_TYPE = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
metrics: METRICS_TYPE = None,
learning_rate: float = 1e-3,
multi_label: bool = False,
output: OUTPUT_TYPE = None,
output_transform: OUTPUT_TRANSFORM_TYPE = None,
) -> None:
if metrics is None:
metrics = IoU(num_classes=num_classes)
if loss_fn is None:
loss_fn = F.cross_entropy
# TODO: need to check for multi_label
if multi_label:
raise NotImplementedError("Multi-label not supported yet.")
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
metrics=metrics,
learning_rate=learning_rate,
output=output or SegmentationLabels(),
output_transform=output_transform or self.output_transform_cls(),
)
self.save_hyperparameters()
if not backbone_kwargs:
backbone_kwargs = {}
if not head_kwargs:
head_kwargs = {}
if isinstance(backbone, nn.Module):
self.backbone = backbone
else:
self.backbone = self.backbones.get(backbone)(**backbone_kwargs)
self.head: nn.Module = self.heads.get(head)(
backbone=self.backbone, num_classes=num_classes, pretrained=pretrained, **head_kwargs
)
self.backbone = self.head.encoder
def training_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().training_step(batch, batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().validation_step(batch, batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
return super().test_step(batch, batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
batch_input = batch[DataKeys.INPUT]
batch[DataKeys.PREDS] = super().predict_step(batch_input, batch_idx, dataloader_idx=dataloader_idx)
return batch
def forward(self, x) -> torch.Tensor:
res = self.head(x)
# some frameworks like torchvision return a dict.
# In particular, torchvision segmentation models return the output logits
# in the key `out`.
if _isinstance(res, Dict[str, torch.Tensor]):
res = res["out"]
return res
@classmethod
def available_pretrained_weights(cls, backbone: str):
result = cls.backbones.get(backbone, with_metadata=True)
pretrained_weights = None
if "weights_paths" in result["metadata"]:
pretrained_weights = list(result["metadata"]["weights_paths"])
return pretrained_weights
@staticmethod
def _ci_benchmark_fn(history: List[Dict[str, Any]]):
"""This function is used only for debugging usage with CI."""
assert history[-1]["val_iou"] > 0.2
| [
"noreply@github.com"
] | noreply@github.com |
ed470021c5d7320465b10f40070de60988c328b6 | fd987458b3b294a7e757aaf1d80c5a9a6a3da53f | /DataModeling/backlog/FFMap.py | 27b1ff41ab5df28777338c9baae8055c74af531a | [
"MIT",
"Apache-2.0"
] | permissive | achyun/OpenVisuMap | 77d487e4f434b30377dbba695870c13f8022ff52 | d4f19b4c6ad12e25b407966571f830eb28e0b610 | refs/heads/master | 2023-09-04T07:30:12.421170 | 2021-11-01T22:48:34 | 2021-11-01T22:48:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,090 | py | #=========================================================================
# FFRegression.py
#=========================================================================
import sys, os, math, time, shutil, ModelLog
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import tensorflow as tf
import numpy as np
from Util import MakePairs
#=========================================================================
modelName = sys.argv[1]
epochs = int(sys.argv[2])
logLevel = int(sys.argv[3])
refreshFreq = int(sys.argv[4])
keep_prob_p=0.95
batch_sz = 25
learning_rate = 0.001
X = np.genfromtxt('inData.csv', delimiter='|', dtype=np.float32)
Y = np.genfromtxt('outData.csv', delimiter='|', dtype=np.float32)
x_dim = np.shape(X)[1]
y_dim = np.shape(Y)[1]
N = np.shape(X)[0]
#=========================================================================
var_x = tf.placeholder( tf.float32, shape=[None, 2*x_dim] )
var_y = tf.placeholder( tf.float32, shape=[None, 2*y_dim] )
keep_prob = tf.placeholder( tf.float32 )
if y_dim == 3:
dims = [2*x_dim, 100, 80, 50, 20, 2*y_dim]
else:
dims = [2*x_dim, 50, 30, 20, 2*y_dim]
W, b, H = [], [], [var_x]
len_dims = len(dims)
for k in range(1, len_dims):
W.append(tf.Variable( tf.random_normal([dims[k-1], dims[k]], 0.0, 0.5) ))
b.append(tf.Variable( tf.zeros([dims[k]]) ))
h = tf.nn.sigmoid(tf.matmul(H[k-1], W[k-1]) + b[k-1])
if k == 1 and keep_prob_p != 1.0: # add dropout to the first hidden layer
h = tf.nn.dropout(h, keep_prob)
H.append(h)
y = H[-1]
tf.add_to_collection('vars', var_x)
tf.add_to_collection('vars', y)
tf.add_to_collection('vars', keep_prob)
#for lay in range(1, len_dims): tf.add_to_collection('vars', H[lay])
cost = tf.reduce_sum(tf.square(var_y - y))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
log = ModelLog.Logger(sess, 8888)
#=========================================================================
def CreatePrediction(input):
output = sess.run(y, feed_dict={var_x: input, keep_prob:1.0})
np.savetxt("predData.csv", output, delimiter='|', fmt='%.5f')
def DiffL2(input, output):
_, err = sess.run([y, cost], feed_dict={var_x:input, var_y:output, keep_prob:1.0})
err = math.sqrt(err)
return err
from copy import deepcopy
def DeepValidate():
np.random.seed(123)
diffL2 = 0
K = 10
stdX = np.std(X, axis=0)
for k in range(K):
XX = deepcopy(X)
for i in range(N):
for j in range(x_dim):
XX[i][j] += np.random.uniform(-0.5, +0.5) * stdX[j]
diffL2 += DiffL2(XX, Y)
msg = "Recovery Loss: " + '{0:.5f}'.format(DiffL2(X,Y)) + " Av. Diff-L2: " + '{0:.5f}'.format(diffL2/K) + " NN: " + str(dims)
print(msg)
log.ReportMsg(msg)
def LogReport(ep, error):
error2 = math.sqrt(error)
log.ReportCost(ep, error2)
if logLevel >= 2:
print(ep, ": ", error2)
CreatePrediction(X)
log.RefreshMap()
#=========================================================================
logTable = []
error = 0.0;
eP = float(batch_sz)/N;
eQ = 1 - eP;
X = MakePairs(X, True)
Y = MakePairs(Y, True)
rOrder = np.random.rand(N).argsort()
rX = np.take(X, rOrder, axis=0)
rY = np.take(Y, rOrder, axis=0)
for ep in range(1, epochs+1):
for i in range(0, N, batch_sz):
_, err = sess.run([train_step, cost], feed_dict={
var_x:rX[i:i+batch_sz],
var_y:rY[i:i+batch_sz],
keep_prob:keep_prob_p})
error = eQ*error + eP*err;
if ep % refreshFreq == 0: LogReport(ep, error)
#logTable.append(log.VarStates([b[0], b[1]]))
if ep % refreshFreq != 0: LogReport(ep, error)
if len(logTable) > 0: log.ShowMatrix(np.array(logTable).transpose(), 'log.bin')
#log.ShowMatrix(sess.run(H[1], feed_dict={var_x:X, keep_prob:1.0}), 'log1.bin')
#=========================================================================
saver = tf.train.Saver()
saver.save(sess, '.\\' + modelName, None, modelName+'.chk')
log.Completed()
if logLevel>=3: DeepValidate()
| [
"jamesxli@visumap.com"
] | jamesxli@visumap.com |
b79d517c91a75383143e6245ca26f5d5dc4c4e01 | 33ba0c08d8302eeede1cb3c8d9a3797a1a572ac5 | /assignments/11-regex-dates/dates.py | e79758f77afb8ecd1792fdf0a34eca7954beb8b9 | [
"MIT"
] | permissive | mattmiller899/biosys-analytics | a39db0f793b317c5de6c3a02b905536fdff66a71 | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | refs/heads/master | 2022-08-18T08:52:50.827883 | 2021-05-26T21:07:42 | 2021-05-26T21:07:42 | 166,158,190 | 0 | 0 | MIT | 2019-01-17T04:07:06 | 2019-01-17T04:07:05 | null | UTF-8 | Python | false | false | 1,951 | py | #!/usr/bin/env python3
"""
Author : mattmiller899
Date : 2019-04-08
Purpose: Rock the Casbah
"""
import os
import sys
import re
# --------------------------------------------------
def main():
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} DATE'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
arg = args[0]
comp1 = re.compile("(?P<year>\d{4})[-]?(?P<month>\d{,2})[-]?(?P<day>\d{,2})")
comp2 = re.compile("(?P<month>\d{1,2})/(?P<year>\d{1,2})")
comp3 = re.compile("(?P<month>\w+)[-,][\s]?(?P<year>\d{4})")
m1 = re.match(comp1, arg)
m2 = re.match(comp2, arg)
m3 = re.match(comp3, arg)
if m1:
year = m1.group('year')
month = m1.group('month')
day = m1.group('day')
if month == "":
print("No match")
exit()
if len(day) == 0:
day = "01"
print("{}-{:02d}-{:02d}".format(year, int(month), int(day)))
exit()
if m2:
year = m2.group('year')
month = m2.group('month')
print('20{:02d}-{:02d}-01'.format(int(year), int(month)))
exit()
if m3:
year = m3.group("year")
str_month = m3.group("month")
short_months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
long_months = ('January February March April May June July August '
'September October November December').split()
if str_month in short_months:
d = dict(map(reversed, enumerate(short_months, 1)))
month = d[str_month]
print('{}-{:02d}-01'.format(year, month))
exit()
elif str_month in long_months:
d = dict(map(reversed, enumerate(long_months, 1)))
month = d[str_month]
print('{}-{:02d}-01'.format(year, month))
exit()
print("No match")
# --------------------------------------------------
main()
| [
"mattmiller899@login3.cm.cluster"
] | mattmiller899@login3.cm.cluster |
deb150440060e0d6c968a2ccf2812970012b495a | 27c4e774f053594473da202c1c45dcbf237465be | /Scorm.py | 566403fdd0e407806057a4daa6e2727586ed572a | [] | no_license | Gamboua/zope-migration | 34e6b27962859352fe08a4277a8215b36b01889c | 7a83ed67c5ea561bfa8aa300728390b7220f3633 | refs/heads/master | 2020-12-25T14:49:22.173420 | 2017-10-19T20:47:50 | 2017-10-19T20:47:50 | 67,830,154 | 0 | 1 | null | 2016-10-20T21:42:09 | 2016-09-09T20:20:57 | PHP | UTF-8 | Python | false | false | 1,910 | py | import paramiko
import os
from scp import SCPClient
from config import *
from Command import Command
import random, string
class Scorm:
def __init__(self, scorm, course):
self.course = course
self.scp = None
self.type = 'scorm'
self.section = 0
self.folder = self.get_if_exists('folder', scorm)
self.title = self.get_if_exists('title', scorm)
def get_if_exists(self, parameter, json):
return json.get(parameter) if parameter in json else None
def scorm_add(self):
self.scorm_import_folder()
zip_name = self.scorm_zip()
Command.command_execute(Command.activity_create_command(
options=self.get_scorm_options(zip_name), type=self.type, id=self.course.id
))
def get_scorm_options(self, name):
params = []
if self.section is not None:
params.append('--section %s' % self.section)
if self.title:
params.append('--name "%s"' % self.title)
params.append('--filepath /tmp/%s.zip' % name)
return ' '.join(params)
def scorm_zip(self):
name = ''.join(random.choice(string.ascii_letters) for x in range(8))
os.chdir(self.folder)
os.system('zip -r /tmp/%s *' % name)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
return name
def scorm_import_folder(self):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(REMOTE_SCORM_SERVER, REMOTE_SCORM_PORT, REMOTE_SCORM_USER)
scp = SCPClient(client.get_transport())
if not os.path.isdir('/opt/zope298/courses'):
os.makedirs('/opt/zope298/courses')
scp.get(
self.folder,
'/opt/zope298/courses/',
recursive=True
)
scp.close()
| [
"gabriel.bonfim@4linux.com.br"
] | gabriel.bonfim@4linux.com.br |
4a5b7c6844ca194b50ed70323648cba57b6e0b8d | c6e5bbafd810d23e0ee46d69026cba35339d1dbd | /accounts/managers.py | 42d3aae755fba1358031994ccd3a06d4ca8dcdd1 | [] | no_license | mfonism/django-inqueerstigate | 9c8b729848bf3df9fb9ec991ec47391b69ad7b66 | af5420bf8adf6aa89533cd1462d9eeed6e8c88db | refs/heads/main | 2023-05-26T12:59:55.774989 | 2021-06-07T11:46:48 | 2021-06-07T11:46:48 | 323,681,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError("The given email must be set")
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(email, password, **extra_fields)
| [
"mfonetimfon@gmail.com"
] | mfonetimfon@gmail.com |
8da0b36863fe5720eff6f961ca1136f07cd253f3 | 648e0c3bcd4b26d72087c9a785fea85dbd5dce59 | /chap05/chap05.py | ae3b450c6f4a0524790246da33fea6690a8e079f | [] | no_license | bryansis2010/machinelearninginaction | d31f6c10996155957593123297fcbd4c0d954a57 | f21a2af2c306d5c53d56d5a7bcd8a5f2f07fea1b | refs/heads/master | 2020-06-02T06:57:03.163294 | 2015-04-05T14:50:07 | 2015-04-05T14:50:07 | 31,070,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | #-------------------------------------------------------------------------------
# Name: chap05
#
# Author: bryansis2010
#
# Created: 05/04/2015
#-------------------------------------------------------------------------------
#imports relevant to the text
from numpy import *
import re
#my own imports to make things easier
import os
resource_path = os.path.dirname(__file__)
file_name = r"resource\testSet.txt"
testSet_txtfile = ("%s\%s" % (resource_path, file_name))
testSet_fh = open(testSet_txtfile)
testSet_text = testSet_fh.readlines()
dataMat = []; labelMat = []
for line in testSet_text:
line_array = line.split("\t")
dataMat.append([1.0, float(line_array[0]), float(line_array[1])])
labelMat.append(int(line_array[-1]))
#print(testSet_text) | [
"bryan.sis.2010@gmail.com"
] | bryan.sis.2010@gmail.com |
6976c5d4d8149dc07efe4fe71690283a0ffcb22c | d1e6b312d8d4e4601213ad44f1b2ce90563118c9 | /reviews/models.py | 4b958f226dcbcf60b13e4c7250d0dfbce647021d | [] | no_license | seabass189/owhatacookie | 8a1fb8b99d69b4b44d0ec981a0fe09c3de710ba2 | 96cb3d7c54d71c187f53cc37876fba171ea5bac9 | refs/heads/master | 2023-01-02T01:30:07.974163 | 2020-10-25T23:19:01 | 2020-10-25T23:19:01 | 294,293,474 | 0 | 0 | null | 2020-09-22T00:00:30 | 2020-09-10T03:33:00 | Python | UTF-8 | Python | false | false | 563 | py | from django.db import models
from orders.models import Order
# from customers.models import Customer
class Review(models.Model):
header = models.CharField(max_length=50)
text = models.CharField(max_length=500, blank=True, null=True)
stars = models.PositiveSmallIntegerField()
customer = models.ForeignKey(Customer,on_delete=models.CASCADE)
order = models.ForeignKey(Order,on_delete=models.CASCADE)
def __str__(self):
return (str(self.stars) + ' stars - Customer: '
+ str(self.customer) + ' Order: ' + str(self.order.id))
| [
"33647914+seabass189@users.noreply.github.com"
] | 33647914+seabass189@users.noreply.github.com |
db0454c4c301f4b509ebb198c08bac7e87c6a3bd | d19d16ddc922b0915aff982568c5c71ee58fb8b9 | /dataset/utils.py | f13a627e795ae92c6dca77770e719e98d0542e2e | [] | no_license | zhaoyuzhi/HSGAN | 036a6fec722d564f9b203f6032bf47039c1eadd4 | f974761ec4a65ef58283ae4ccba618b97e79c4bc | refs/heads/main | 2023-08-03T10:06:05.195187 | 2023-07-27T14:21:54 | 2023-07-27T14:21:54 | 337,642,689 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | import os
import numpy as np
# ----------------------------------------
# PATH processing
# ----------------------------------------
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(filespath)
return ret
def get_mats(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(os.path.join(root, filespath))
return ret
def get_mats_name(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(filespath.split('.')[0])
return ret
def get_bmps(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'bmp':
ret.append(os.path.join(root, filespath))
return ret
def get_pairs_name(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-3:] == 'mat':
ret.append(filespath.split('.')[0])
return ret
# ----------------------------------------
# PATH processing
# ----------------------------------------
def text_readlines(filename):
# Try to read a txt file and return a list.Return [] if there was a mistake.
try:
file = open(filename, 'r')
except IOError:
error = []
return error
content = file.readlines()
# This for loop deletes the EOF (like \n)
for i in range(len(content)):
content[i] = content[i][:len(content[i])-1]
file.close()
return content
def text_save(content, filename, mode = 'a'):
# save a list to a txt
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i]) + '\n')
file.close()
def savetxt(name, loss_log):
np_loss_log = np.array(loss_log)
np.savetxt(name, np_loss_log)
| [
"noreply@github.com"
] | noreply@github.com |
88128fb4f2ef8cb1c5a3e6135fa51534bd1e2718 | 949a6268b7bbd2707ce21a4398a5eedd8c1509b4 | /hw_1016/title_state.py | e912a5361ecc183c04eeb83d0ffc10c62e92ba79 | [] | no_license | samang22/School-Project | a2bbeb75f39fcbbb2e8e5ae52bc0272178670e4d | 9736ff9f456fdaea4c2b69ecd5887dd2562c3ba9 | refs/heads/master | 2020-03-28T11:11:53.791271 | 2018-12-14T04:03:16 | 2018-12-14T04:03:16 | 148,187,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from pico2d import *
import game_framework
import boys_state
def enter():
global bgImage
bgImage = load_image('../res/title.png')
def exit():
global bgImage
del bgImage
def draw():
clear_canvas()
bgImage.draw(400, 300)
update_canvas()
def update():
delay(0.03)
def handle_events():
events = get_events()
for e in events:
if e.type == SDL_QUIT:
game_framework.quit()
elif e.type == SDL_KEYDOWN:
if e.key == SDLK_ESCAPE:
game_framework.quit()
# game_framework.pop_state()
elif e.key == SDLK_SPACE:
game_framework.push_state(boys_state)
def pause():
pass
def resume():
pass
if __name__ == '__main__':
import sys
current_module = sys.modules[__name__]
open_canvas()
game_framework.run(current_module)
close_canvas() | [
"samang22@naver.com"
] | samang22@naver.com |
847e83de22c9dbcb04f87362a0d956c786584799 | caace044baf7a6f2b0bda65ae361eed06bddfc3c | /dailyQuestion/2020/2020-06/06-01/python/solution_items.py | 1f7df5de69c465f7a57e918ca5eee350c02c2603 | [
"Apache-2.0"
] | permissive | russellgao/algorithm | fd6126e89c40d7d351c53bbd5fde690c9be899ef | ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea | refs/heads/master | 2023-03-28T03:00:02.370660 | 2021-03-28T10:56:38 | 2021-03-28T10:56:38 | 259,038,372 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py |
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 迭代
def sortList(head: ListNode) -> ListNode:
head_len = 0
invc = 1
h = head
while h :
head_len += 1
h = h.next
result = ListNode(0)
result.next = head
while invc <= head_len :
pre = result
h = result.next
while h :
h1 ,i = h , invc
while i and h :
i -= 1
h = h.next
if i :
break
h2, i = h, invc
while i and h :
i -= 1
h = h.next
c1, c2 = invc, invc-i
while c1 and c2 :
if h1.val > h2.val :
pre.next = h2
h2 = h2.next
c2 -= 1
else :
pre.next = h1
h1 = h1.next
c1 -= 1
pre = pre.next
pre.next = h1 if c1 else h2
while c1 > 0 or c2 > 0 :
pre = pre.next
c1 -= 1
c2 -= 1
pre.next = h
invc <<= 1
return result.next
if __name__ == "__main__" :
node = ListNode(4)
node.next = ListNode(2)
node.next.next = ListNode(1)
node.next.next.next = ListNode(3)
node.next.next.next.next = ListNode(5)
result = sortList(node)
while result :
print(result.val)
result = result.next
print()
| [
"gaoweizong@hd123.com"
] | gaoweizong@hd123.com |
df13faf30c4a370935e8475eda2e144e87e571bb | a51380a47c97a0a7b5b9b517ad412334a299d956 | /wordcount/views.py | d4229e37348d4a8d04622b77015c10706adbe074 | [] | no_license | buhyunk3/topproject | 8fdead719430c4f0f809ffcf3d338d7f0126010b | 6934319f2de2291d8efa1a2ed8be6c49c228f792 | refs/heads/master | 2020-04-17T09:55:01.641594 | 2019-01-18T22:12:42 | 2019-01-18T22:12:42 | 166,479,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def result(request):
text = request.GET['fulltext']
words = text.split()
word_dictionary = {}
for word in words:
if word in word_dictionary:
#increase
word_dictionary[word]+=1
else:
# add to dictionary
word_dictionary[word]=1
return render(request, 'result.html', {'full': text, 'total' : len(words), 'dictionary' : word_dictionary.items()})
| [
"buhyunk3@likelion.org"
] | buhyunk3@likelion.org |
bf2bb21fe32c046e31ac269a94e444f91dc0217b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03626/s873858203.py | 5c64e4b78d0537220084f2cc138a77120c711579 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | MOD = 1000000007
n = int(input())
s1 = input()
s2 = input()
if s1[0] == s2[0]:
ans = 3
i = 1
prev = 1
else:
ans = 6
i = 2
prev = 2
while i<n:
if s1[i] == s2[i]:
i += 1
if prev == 1:
ans *= 2
else:
prev = 1
else:
i += 2
if prev == 1:
ans *= 2
prev = 2
else:
ans *= 3
ans %= MOD
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c4255ced1b54803e492ceff98a8071bf9141f62f | 51c1304e01aa5b83b8a0f98c1c182f47a2cca090 | /scripts/sisi_angle_script.py | 2006860e20887c466e1011cbe62ba4f4987c0649 | [] | no_license | Schulmanlab/nanotube_image_analysis_scripts | 5e9283f90f217093d7aa4b9186cfa14d8b25a3cd | 078be9239ca19582ace6b2589f95ac6bdc94cc2c | refs/heads/master | 2020-03-19T21:45:20.439149 | 2018-06-11T18:30:52 | 2018-06-11T18:30:52 | 136,949,013 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,367 | py | import numpy as np
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
from skimage import io
from skimage.morphology import closing, disk
from skimage.morphology import skeletonize
from skimage.measure import label, regionprops
from skimage.filters import threshold_otsu, threshold_local, rank
import sys
import os
import matplotlib.pyplot as plt
import math
from matplotlib import cm
from matplotlib import path
from skimage import img_as_uint
from scipy import ndimage
from scipy.spatial import distance
from scipy import ndimage as ndi
from numpy import unravel_index
import Tkinter, tkFileDialog
from skimage.external import tifffile
#modifying the joining detection script to measure the angle of Sisi's nanotubes relative to the x-axis of her images
#constants
#constants
tube_width = 5.0
length_cutoff = 3.0
eccentricity_cutoff = 0.5
end_to_end_distance_cutoff = 10.0
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(abs(dotproduct(v1, v2) )/ (length(v1) * length(v2)))
def line_length(line):
p0, p1 = line
a = np.array((p0[0],p0[1]))
b = np.array((p1[0],p1[1]))
dist = np.linalg.norm(a-b)
#print dist
return dist
def make_endpoints_mask(filled_binary_image):
#function to determine the endpoints of a nanotube identified via edge detection/morphological filling
#need to find all endpoint candidates and find the pair separated by the longest path
#first skeletonize the filled binary image (must be a binary int image)
filled_binary_image = filled_binary_image.astype(int)
skeleton = skeletonize(filled_binary_image)
skeleton = skeleton.astype(int)
#now we make a kernel to compute the endpoints of the skeletonized image
kernel = np.uint8([[1, 1, 1], [1, 10, 1], [1, 1, 1]])
#now we convolve the kernel with the skeletonized image
convolved_skeleton = ndimage.convolve(skeleton, kernel, mode='constant', cval = 1)
#now produce an output mask with only pixels with value 11, these are the endpoints
endpoint_mask = np.zeros_like(convolved_skeleton)
endpoint_mask[np.where(convolved_skeleton == 11)] = 1
return endpoint_mask
def endpoints(region_coords, endpoint_mask):
#using a previously genereated endpoint mask to find the endpoints for a particular tube
#this will return a pair of tubles with the x,y coordinates of the two endpoints
endpoints_labelled = label(endpoint_mask)
potential_endpoints = []
for endpoint in regionprops(endpoints_labelled):
if any(i in region_coords for i in endpoint.coords.tolist()):
potential_endpoints.append(endpoint.centroid)
#now we will find the pair of potential endpoints with the maximal separation distance, those are the true endpoints
if len(potential_endpoints) <= 1:
return None
pairwise_distances = distance.cdist(potential_endpoints, potential_endpoints, 'euclidean')
indices_of_max_distance = unravel_index(pairwise_distances.argmax(), pairwise_distances.shape)
endpoint1 = potential_endpoints[indices_of_max_distance[0]]
endpoint2 = potential_endpoints[indices_of_max_distance[1]]
#print endpoint1
#print endpoint2
endpoints = [endpoint1, endpoint2]
return endpoints
def are_joined(endpoint1, endpoint2):
#given two endpoints calculate the distance between them and return True or False for whether they meet the joining criteria
cutoff = 5.0
distance = distance(endpoint1,endpoint2)
if distance <= cutoff:
return True
else:
return False
def calc_distance(endpoint1, endpoint2):
#simple distance calculation
distance_squared = (endpoint1[0]-endpoint2[0]) * (endpoint1[0]-endpoint2[0]) + (endpoint1[1]-endpoint2[1]) * (endpoint1[1]-endpoint2[1])
distance = math.sqrt(distance_squared)
return distance
# Line finding using the Probabilistic Hough Transform
tube_lengths = []
tube_angles = []
i=0
#cy3_file_list = os.listdir('6_nt')
'''root = Tkinter.Tk()
root.withdraw()
file_paths = tkFileDialog.askopenfilenames()
cy3_file_list = list(file_paths)
'''
cy3_image_stack = tifffile.imread("0_2um_tube.tif")
for image in cy3_image_stack:
total_images = len(cy3_image_stack)
current_frame = i
print "processing frame " +str(i) + " of "+str(total_images)
cy3_file = cy3_file_list[i]
#print "cy3 filename is "+str(cy3_file)
image_unthresholded = io.imread(cy3_file)
#thresh = threshold_otsu(image_unthresholded)
#image = image_unthresholded>thresh
block_size = 15
#image = threshold_local(image_unthresholded, block_size, offset=10)
#image_647 = threshold_local(image_647_unthresholded, block_size, offset=10)
radius = 5
selem = disk(radius)
#thresholding both files (getting rid of this because it should not be necessary!)
#image = rank.otsu(image_unthresholded, selem)
#image_647 = rank.otsu(image_647_unthresholded, selem)
image = image_unthresholded
#perfoming edge detection and morphological filling
edges_open = canny(image, 2, 1, 50) #originally 2,1,25 last param can go up to 500 for improved performance, must lower for poorer images
#edges_open = canny(image, 2) #originally 2,1,25
selem = disk(3)#originally 5
edges = closing(edges_open, selem)
fill_tubes = ndi.binary_fill_holes(edges)
io.imsave(cy3_file+"fill_tubes.png", img_as_uint(fill_tubes), cmap=cm.gray)
cy3_endpoint_mask = make_endpoints_mask(fill_tubes)
#label image
label_image = label(fill_tubes)
print "detecting nanotube angles...."
print len(regionprops(label_image))
for region in regionprops(label_image):
if region.area/tube_width >= length_cutoff and region.eccentricity >= eccentricity_cutoff:
region_coords = region.coords.tolist()
region_endpoints = endpoints(region_coords, cy3_endpoint_mask)
if region_endpoints == None:
continue
endpoint_to_endpoint_vector = np.subtract(region_endpoints[0], region_endpoints[1])
x_axis_vector = np.array([0, 1])
angle_with_x_axis = angle(endpoint_to_endpoint_vector, x_axis_vector)
angle_with_x_axis *= 180.0/math.pi
print 'angle with x axis is: ', angle_with_x_axis
tube_angles.append(angle_with_x_axis)
i+=1
print "printing angles"
f1=open('angles.dat','w+')
for angle in tube_angles:
print >>f1, angle
f1.close()
| [
"mpacella88@gmail.com"
] | mpacella88@gmail.com |
7620f89c22df19a666234828f74b8c1e662b8d0f | fd0e3562df4c4aadac3717de5cabef499e209553 | /battlefield.py | c22aeb83b800f249a39347fd5c0e15fc42bb9e18 | [] | no_license | ImHucklle/Robots_vs_Dinos | 9833a42bab08b9ae5e8296ac15093fb33d9c831f | 3e50f98b6e7cc092d6711174765f4283ec95c795 | refs/heads/main | 2023-09-06T06:43:55.252832 | 2021-11-18T20:45:21 | 2021-11-18T20:45:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | from fleet import Fleet
from herd import Herd
class Battlefield:
def __init__(self):
self.fleet = Fleet()
self.herd = Herd()
def display_welcome(self):
print ("Hello, welcome to the carnage!")
def game_start(self):
user = input("Ready to fight for survival? Enter y or n.")
if(user == "y"):
print("**Game Starts**")
def battle(self):
while len(self.herd.dinosaurs) > 0 and len(self.fleet.robots) > 0:
if len(self.herd.dinosaurs) > 0 and len(self.fleet.robots) > 0:
self.dino_turn()
elif len(self.herd.dinosaurs) > 0 and len(self.fleet.robots) > 0:
self.robo_turn()
print(self.herd.dinosaurs)
self.herd.dinosaurs.display_winnners()
def dino_turn(self):
print("Choose your dinosaur to attack:")
self.show_dino_opponent_options()
dino_champion = int(input())
if len(self.fleet.robots) > 0:
print("Choose the robot that'll defend:")
self.show_robo_opponent_options()
robot_champion = int(input())
self.herd.dinosaurs[dino_champion].attack(self.fleet.robots[robot_champion])
if self.fleet.robots[robot_champion].health <= 0:
print(f"{self.fleet.robots[robot_champion].name} has fainted")
self.fleet.robots.remove(self.fleet.robots[robot_champion])
else:
self.display_winnners()
def robo_turn(self):
print("Choose the robot who will attack:")
self.show_robo_opponent_options()
robot_champion = int(input())
if len(self.herd.dinosaurs) > 0:
print("Choose the dinosaur who will defend:")
self.show_dino_opponent_options()
dino_champion = int(input())
self.fleet.robots[robot_champion].attack(self.herd.dinosaurs[dino_champion])
if self.herd.dinosaurs[dino_champion].health <= 0:
print(f"{self.herd.dinosaurs[dino_champion].name} has fainted")
self.herd.dinosaurs.remove(self.herd.dinosaurs[dino_champion])
else:
self.display_winnners()
def show_dino_opponent_options(self):
dino_index = 0
for dino in self.herd.dinosaurs:
print(f"Press {dino_index} for {dino.name}")
dino_index += 1
def show_robo_opponent_options(self):
robot_index = 0
for robot in self.fleet.robots:
print(f"Press {robot_index} for {robot.name}")
robot_index += 1
def display_winners(self):
if len(self.herd.dinosaurs == 0 ):
print("The Dinosaurs went EXTINCT! Robots WIN!")
if len(self.fleet.robots == 0 ):
print("The Robots are DEAD! Dinosaurs WIN!")
def run_game(self):
self.display_welcome()
self.game_start()
self.robot0_battle = self.fleet.robots[0]
self.robot1_battle = self.fleet.robots[1]
self.robot2_battle = self.fleet.robots[2]
self.dino0_battle = self.herd.dinosaurs[0]
self.dino1_battle = self.herd.dinosaurs[1]
self.dino2_battle = self.herd.dinosaurs[2]
self.battle() | [
"mylesnlister@Myless-MacBook-Pro.local"
] | mylesnlister@Myless-MacBook-Pro.local |
ac6c3bdcd556fa864f83317da9c8ce709377b0a7 | 033489cc8c1c32c9d8cae695f2fe79643dbe852a | /scripts/count.py | 9b77bc65e69888058aef1c0bd9719e336218b82d | [
"BSD-3-Clause"
] | permissive | ueshou/mypkg | 46b6636c8caba78b6e9ccbf5162cf4f563ff1987 | 69d5f3b6c2949f9cf3b7f4914c8a5132aaaff66b | refs/heads/main | 2023-02-17T22:56:49.338683 | 2021-01-22T05:48:57 | 2021-01-22T05:48:57 | 327,495,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
rospy.init_node('count')
pub = rospy.Publisher('count_up' , Int32, queue_size=1)
rate = rospy.Rate(10)
n = 0
while not rospy.is_shutdown():
n += 1
pub.publish(n)
rate.sleep()
| [
"s19c1015uq@s.chibakouda.jp"
] | s19c1015uq@s.chibakouda.jp |
bcec11864e5a79afc5b9dcfbadbcba43c0dff5e0 | 08c48f2627281810fe2a4a37bb1e9bc5c03eeb68 | /Huan_link_all_script/All_result_ICGC/network/random_walk_restart/Walker/scripts/transform_matrix.py | d88430ba3123bda3df7da3bda747ef7d17166e4b | [] | no_license | Lhhuan/drug_repurposing | 48e7ee9a10ef6735ffcdda88b0f2d73d54f3b36c | 4dd42b35e47976cf1e82ba308b8c89fe78f2699f | refs/heads/master | 2020-04-08T11:00:30.392445 | 2019-08-07T08:58:25 | 2019-08-07T08:58:25 | 159,290,095 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import sys
import numpy as np
def main(argv):
matrix_file = argv[1]
output_filename = argv[2]
matrix = np.loadtxt(matrix_file)
def f(x): return 1 / float(x)
f = np.vectorize(f)
matrix = f(matrix)
transformed_matrix = [([0] * len(matrix[0])) for _ in xrange(len(matrix[0]))]
for i, row in enumerate(matrix):
for j, col in enumerate(row):
transformed_matrix[i][j] = matrix[i][j] + matrix[j][i]
np.savetxt(output_filename, np.array(transformed_matrix), fmt='%.10f')
if __name__ == '__main__':
main(sys.argv)
| [
"lhhuan01@163.com"
] | lhhuan01@163.com |
55d8310f311948081576b53b7f9881db250ab417 | 36121f94d6ffcc23e37e81920885fea7b8613bd4 | /ColourUse/colourMap_crameri/ScientificColourMaps7/bamO/bamO.py | 523c6446441659ebd362b742b541fbd3dca58a1f | [] | no_license | mickaellalande/MC-Toolkit | 16079a58930293baa86a2be4e6c3e9755a74f861 | c0c17c107881751b82c7d99c5c7f328ddbe1fada | refs/heads/master | 2023-07-21T19:53:01.905408 | 2023-07-06T09:20:27 | 2023-07-06T09:20:27 | 233,410,384 | 11 | 6 | null | 2021-05-11T10:07:30 | 2020-01-12T15:04:06 | Jupyter Notebook | UTF-8 | Python | false | false | 12,400 | py | #
# bamO
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.30946, 0.18635, 0.26374],
[0.31419, 0.18609, 0.2688],
[0.31943, 0.18615, 0.27428],
[0.32515, 0.18655, 0.2802],
[0.33135, 0.18735, 0.28652],
[0.33801, 0.18851, 0.29323],
[0.34509, 0.19002, 0.30029],
[0.35254, 0.19193, 0.30766],
[0.36032, 0.19424, 0.3153],
[0.36841, 0.19687, 0.32316],
[0.37673, 0.19981, 0.33118],
[0.38522, 0.20312, 0.33937],
[0.39388, 0.20673, 0.34762],
[0.40263, 0.21057, 0.35593],
[0.41144, 0.21468, 0.36426],
[0.42029, 0.21903, 0.37259],
[0.42913, 0.22357, 0.3809],
[0.43796, 0.22831, 0.38916],
[0.44674, 0.23316, 0.39735],
[0.45546, 0.2382, 0.40548],
[0.46412, 0.24335, 0.41353],
[0.47268, 0.24864, 0.42148],
[0.48116, 0.25399, 0.42936],
[0.48954, 0.25945, 0.43714],
[0.49784, 0.26495, 0.44482],
[0.506, 0.27056, 0.45241],
[0.51409, 0.2762, 0.45989],
[0.52204, 0.28188, 0.46728],
[0.52991, 0.2876, 0.47457],
[0.53765, 0.29337, 0.48176],
[0.54528, 0.29918, 0.48885],
[0.55279, 0.30499, 0.49584],
[0.5602, 0.31084, 0.50275],
[0.56749, 0.31669, 0.50956],
[0.57466, 0.32257, 0.51626],
[0.58173, 0.32844, 0.52289],
[0.58869, 0.33433, 0.52942],
[0.59554, 0.34023, 0.53586],
[0.60229, 0.34612, 0.54222],
[0.60895, 0.35204, 0.5485],
[0.6155, 0.35798, 0.5547],
[0.62197, 0.36392, 0.56083],
[0.62837, 0.36989, 0.56692],
[0.63469, 0.3759, 0.57295],
[0.64095, 0.38195, 0.57894],
[0.64717, 0.38805, 0.5849],
[0.65334, 0.39422, 0.59083],
[0.65949, 0.40047, 0.59676],
[0.66561, 0.40678, 0.60268],
[0.6717, 0.41319, 0.60861],
[0.67779, 0.41968, 0.61452],
[0.68385, 0.42627, 0.62046],
[0.68991, 0.43298, 0.6264],
[0.69595, 0.43978, 0.63234],
[0.70196, 0.44669, 0.6383],
[0.70796, 0.45371, 0.64425],
[0.71393, 0.46083, 0.65021],
[0.71988, 0.46807, 0.65616],
[0.72579, 0.47542, 0.66211],
[0.73165, 0.48285, 0.66804],
[0.73747, 0.4904, 0.67395],
[0.74325, 0.49806, 0.67983],
[0.74896, 0.50579, 0.6857],
[0.75461, 0.51364, 0.69151],
[0.76017, 0.52157, 0.69729],
[0.76567, 0.52959, 0.70301],
[0.77107, 0.53769, 0.70868],
[0.77637, 0.54587, 0.71426],
[0.78156, 0.5541, 0.71977],
[0.78664, 0.5624, 0.72519],
[0.79159, 0.57073, 0.73049],
[0.7964, 0.57911, 0.73569],
[0.80107, 0.5875, 0.74075],
[0.80558, 0.59592, 0.74568],
[0.80993, 0.60432, 0.75046],
[0.8141, 0.61269, 0.75507],
[0.81809, 0.62103, 0.75949],
[0.82189, 0.6293, 0.76373],
[0.82548, 0.6375, 0.76776],
[0.82887, 0.64559, 0.77159],
[0.83204, 0.65356, 0.77518],
[0.83499, 0.66138, 0.77853],
[0.83772, 0.66905, 0.78165],
[0.84023, 0.67653, 0.78452],
[0.84252, 0.68381, 0.78713],
[0.84458, 0.69088, 0.78948],
[0.84641, 0.69773, 0.79158],
[0.84804, 0.70434, 0.79344],
[0.84947, 0.71071, 0.79503],
[0.85068, 0.71682, 0.79639],
[0.8517, 0.72267, 0.7975],
[0.85255, 0.72828, 0.79839],
[0.8532, 0.73363, 0.79906],
[0.85369, 0.73873, 0.79951],
[0.85402, 0.74357, 0.79976],
[0.8542, 0.74817, 0.79982],
[0.85425, 0.75252, 0.79969],
[0.85415, 0.75664, 0.79939],
[0.85394, 0.76053, 0.79892],
[0.85361, 0.7642, 0.79831],
[0.85318, 0.76766, 0.79755],
[0.85265, 0.77091, 0.79666],
[0.85202, 0.77396, 0.79565],
[0.8513, 0.77682, 0.79453],
[0.85052, 0.77949, 0.7933],
[0.84966, 0.78199, 0.79197],
[0.84873, 0.78432, 0.79056],
[0.84774, 0.78649, 0.78907],
[0.84669, 0.7885, 0.78749],
[0.84559, 0.79037, 0.78585],
[0.84444, 0.7921, 0.78414],
[0.84323, 0.7937, 0.78235],
[0.84198, 0.79517, 0.7805],
[0.84068, 0.79652, 0.77858],
[0.83931, 0.79777, 0.77659],
[0.83789, 0.79891, 0.77451],
[0.83639, 0.79995, 0.77234],
[0.83483, 0.80089, 0.77007],
[0.83318, 0.80174, 0.76769],
[0.83143, 0.8025, 0.76519],
[0.82958, 0.80316, 0.76254],
[0.8276, 0.80372, 0.75972],
[0.82549, 0.80417, 0.75674],
[0.82324, 0.80452, 0.75355],
[0.82081, 0.80473, 0.75014],
[0.8182, 0.80482, 0.7465],
[0.8154, 0.80475, 0.74259],
[0.81237, 0.80452, 0.7384],
[0.80912, 0.8041, 0.73391],
[0.8056, 0.80348, 0.7291],
[0.80181, 0.80264, 0.72394],
[0.79772, 0.80155, 0.71842],
[0.79333, 0.80019, 0.71252],
[0.78861, 0.79852, 0.70623],
[0.78354, 0.79653, 0.69951],
[0.77812, 0.79421, 0.69239],
[0.77232, 0.79152, 0.68483],
[0.76617, 0.78844, 0.67687],
[0.75964, 0.78497, 0.66849],
[0.75277, 0.78109, 0.65973],
[0.74556, 0.77681, 0.65062],
[0.73803, 0.77211, 0.64118],
[0.73022, 0.76703, 0.63148],
[0.72215, 0.76158, 0.62155],
[0.71388, 0.75578, 0.61146],
[0.70545, 0.74966, 0.60126],
[0.69689, 0.74325, 0.591],
[0.68825, 0.73658, 0.58073],
[0.67956, 0.7297, 0.57051],
[0.67088, 0.72262, 0.56039],
[0.66223, 0.71541, 0.55039],
[0.65362, 0.70805, 0.54055],
[0.6451, 0.70061, 0.53089],
[0.63668, 0.6931, 0.52144],
[0.62837, 0.68555, 0.51221],
[0.62019, 0.67796, 0.5032],
[0.61213, 0.67037, 0.49445],
[0.60424, 0.66277, 0.48591],
[0.59647, 0.65519, 0.47762],
[0.58885, 0.64764, 0.46958],
[0.58139, 0.64013, 0.46176],
[0.57407, 0.63266, 0.45418],
[0.56691, 0.62524, 0.44682],
[0.55989, 0.61787, 0.43969],
[0.55301, 0.61056, 0.43278],
[0.54628, 0.60331, 0.42607],
[0.53968, 0.59613, 0.41958],
[0.53323, 0.58901, 0.41328],
[0.52691, 0.58197, 0.40717],
[0.52072, 0.57499, 0.40126],
[0.51466, 0.56809, 0.39552],
[0.50873, 0.56127, 0.38995],
[0.50291, 0.55452, 0.38457],
[0.49723, 0.54784, 0.37934],
[0.49166, 0.54122, 0.37429],
[0.48619, 0.5347, 0.36937],
[0.48083, 0.52824, 0.3646],
[0.4756, 0.52184, 0.35997],
[0.47045, 0.51551, 0.35549],
[0.46539, 0.50926, 0.35112],
[0.46042, 0.50304, 0.34688],
[0.45555, 0.49689, 0.34274],
[0.45075, 0.49079, 0.33871],
[0.446, 0.4847, 0.33479],
[0.44132, 0.47866, 0.33092],
[0.4367, 0.47263, 0.32716],
[0.43213, 0.46662, 0.32347],
[0.42759, 0.46061, 0.31986],
[0.42308, 0.45463, 0.31631],
[0.41862, 0.44863, 0.31281],
[0.41419, 0.44264, 0.30939],
[0.40981, 0.43664, 0.30602],
[0.40545, 0.43065, 0.30268],
[0.40113, 0.42467, 0.29945],
[0.39683, 0.41869, 0.29625],
[0.39259, 0.41275, 0.2931],
[0.3884, 0.4068, 0.29003],
[0.38424, 0.40089, 0.28701],
[0.38015, 0.39499, 0.28407],
[0.37611, 0.38915, 0.28119],
[0.37212, 0.38334, 0.27839],
[0.3682, 0.37757, 0.27566],
[0.36434, 0.37185, 0.27297],
[0.36055, 0.36621, 0.27038],
[0.35687, 0.3606, 0.26783],
[0.35323, 0.35511, 0.26539],
[0.34969, 0.34967, 0.26303],
[0.34622, 0.3443, 0.26073],
[0.34287, 0.33906, 0.25851],
[0.33959, 0.33389, 0.25637],
[0.33639, 0.32882, 0.25434],
[0.33333, 0.32385, 0.25237],
[0.33034, 0.319, 0.25047],
[0.32745, 0.31425, 0.2487],
[0.32467, 0.30964, 0.24696],
[0.32201, 0.30512, 0.24532],
[0.31944, 0.30072, 0.24377],
[0.31698, 0.29645, 0.24231],
[0.3146, 0.29228, 0.24091],
[0.31233, 0.28823, 0.23961],
[0.31019, 0.28428, 0.23837],
[0.3081, 0.28043, 0.23723],
[0.30614, 0.27671, 0.23613],
[0.30423, 0.27305, 0.23512],
[0.30241, 0.26949, 0.23415],
[0.30069, 0.266, 0.23325],
[0.29906, 0.26257, 0.23245],
[0.29748, 0.25922, 0.2317],
[0.29598, 0.25589, 0.231],
[0.29455, 0.25265, 0.23036],
[0.29321, 0.24943, 0.22979],
[0.29194, 0.24625, 0.22931],
[0.29073, 0.2431, 0.2289],
[0.28961, 0.23999, 0.22856],
[0.28856, 0.23693, 0.22829],
[0.28759, 0.23383, 0.22809],
[0.28672, 0.2308, 0.22799],
[0.28595, 0.2278, 0.22799],
[0.28529, 0.22478, 0.22809],
[0.28474, 0.22184, 0.2283],
[0.28432, 0.21894, 0.22864],
[0.28404, 0.21608, 0.22909],
[0.28391, 0.21323, 0.22969],
[0.28395, 0.21048, 0.23047],
[0.28417, 0.20779, 0.23145],
[0.28459, 0.20518, 0.23258],
[0.28523, 0.20265, 0.23393],
[0.28613, 0.20023, 0.23553],
[0.28729, 0.19795, 0.23737],
[0.28876, 0.19581, 0.23946],
[0.29053, 0.19383, 0.24186],
[0.29265, 0.19199, 0.24455],
[0.29515, 0.19037, 0.24764],
[0.29805, 0.18898, 0.25106],
[0.30139, 0.18785, 0.25488],
[0.30519, 0.18694, 0.2591]]
bamO_map = LinearSegmentedColormap.from_list('bamO', cm_data)
# For use of "viscm view"
test_cm = bamO_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(bamO_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=bamO_map)
plt.show()
| [
"jb.barre@gmail.com"
] | jb.barre@gmail.com |
06c9466548a22b41756278bd448c149df64fd1c6 | ace6e4dbf09fd26edeba28e9f91fa3e4d21a2f39 | /test_sublime.py | f043587957644abe11af487923f0d5a1667b1574 | [] | no_license | may5day/test | c8f7d01b6f928afce84e80601317f4e026348157 | 1d9eebe72d2a14a65f7328e3fbb2c0c38f054bf9 | refs/heads/master | 2020-04-26T04:02:34.437251 | 2019-03-05T09:09:22 | 2019-03-05T09:09:22 | 173,288,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | this is a test file!
this is not the first line!
hello world! | [
"2849905332@qq.com"
] | 2849905332@qq.com |
8cdbe5d4f721be8e8da2b94aa2163e7bd13f4a7b | c700111e96595d2f0699d20c3260823bf5cf5124 | /Release_01_20171106/src/ex9-integration.py | 495a8bd27c4814191b650bf18fe3816834a70e5e | [] | no_license | jimmielin/HW-Numerical-Physics-201718 | 7de1068465951b48929616623ee0b36bca65f5b5 | 63b2fd59ba1448009124feabce2bb549009b673e | refs/heads/master | 2021-09-03T21:38:53.909261 | 2018-01-12T06:59:15 | 2018-01-12T06:59:15 | 105,414,720 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | # $ RELEASE $
# $ 201711060212Z $ rel01
# $ Signed-Off-By: Haipeng Lin <jimmie.lin@gmail.com>
####################################################################
# Computational Physics, 2017-18 Sem1
# HW-1 Ex-9
#
# (c) 2017 Haipeng Lin <linhaipeng@pku.edu.cn>
# All Rights Reserved.
#
# This program is written as Homework for the Computational Physics
# Course in Peking University, School of Physics.
# NO WARRANTIES, EXPRESS OR IMPLIED, ARE OFFERED FOR THIS PRODUCT.
# Copying is strictly prohibited and its usage is solely restricted
# to uses permitted by the author and homework grading.
#
# This program is Python 3 (3.6.2 on MSYS_NT)
# compatible, and does not support Python 2.
#
# Now Playing: 野孩子 - 杨千嬅
####################################################################
# Euler's Number Constant
e = 2.718281828459045
# This is a simple, Python-dependent implementation of the exp(x)
# If you want to see a Taylor expansion based implementation
# (which is usually more accurate than the Pythonic one)
# Check out Ex-4's Source Code
def exp(x):
return e**x
# Trapezoid Formula for Integration
# Num integTrapezoid(f lambda x: y, a, b, h)
def integTrapezoid(f, a, b, n):
result = 0
h = (b - a)/n
# Sum over k = 1, ... n-1 for f(x_k)
result = h/2 * (f(a) + f(b) + 2*sum([f(a+i*h) for i in range(1,n)]))
return result
# Ex-9 Specific Code
f = lambda r: (-1)/2187 * r**4 * exp((-1) * 2 * r / 3) * (4/81*r**2 - 16/27*r + 16/9)
print(integTrapezoid(f, 0, 60, 1000)) | [
"jimmie.lin@gmail.com"
] | jimmie.lin@gmail.com |
8f02e80998e754774675b8c69805f6d4c4c0b149 | d97322b14525d7f03976ee220ad38a10419215f6 | /Basic/depth_evaluate/run.py | 854aaa0c91dc11daad9f44c85ee6718125f63a52 | [] | no_license | zhoujuncug/Basic | f4281cf4b8d6dc16f3bd40bcd65ad9ad06878f88 | 63cb6d6d535cc207fb216ac12104f373d3bddd9a | refs/heads/master | 2022-12-02T23:32:37.052574 | 2020-08-08T12:20:44 | 2020-08-08T12:20:44 | 286,036,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | import argparse
import pprint
import torch.optim as optim
import visdom
from lib.basic_tools.log import *
from lib.basic_tools.device import *
from lib.basic_tools.reproducibility import *
from lib.dataset.data4depth import *
from lib.models.depth_eval import *
from lib.utils.train import *
from lib.utils.test import *
parser = argparse.ArgumentParser(description='Basic Training')
# train
parser.add_argument('--data_augment', type=bool, default=False)
parser.add_argument('--batch_size', type=int, default=24)
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--milestones', type=list, default=[5, 10, 15, 20, 25])
parser.add_argument('--gamma', type=float, default=0.1)
# visualization
parser.add_argument('--print_freq', type=int, default=1000)
parser.add_argument('--visdom', type=bool, default=True)
# reproducibility
parser.add_argument('--seed', type=list, default=(1, 1))
parser.add_argument('--save_model', type=bool, default=True)
args = parser.parse_args()
# logging
logger, output_dir = create_logger()
logger.info(pprint.pformat(args))
# random seed
if args.seed[0]:
set_seed(args.seed[1])
device = is_cuda()
# dataloader
train_path = '../data/data4depth_train150.npy'
test_path = '../data/data4depth_eval.npy'
train_dataset = DataLoader(
MyDataset(train_path, is_train=True, is_augment=True),
batch_size=args.batch_size,
num_workers=1,
pin_memory=True,
shuffle=True,
)
test_dataset = DataLoader(
MyDataset(test_path, is_train=True, is_augment=True),
batch_size=args.batch_size,
num_workers=1,
pin_memory=True,
shuffle=False,
)
# model
model = DepthEvaluator().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
# scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
# viz
kwargs = {'viz': None}
if args.visdom:
viz = visdom.Visdom(env='depth eval')
kwargs.update({
'viz': viz,
})
# training
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_dataset, optimizer, epoch, **kwargs)
test(args, model, device, test_dataset, epoch, **kwargs)
scheduler.step() | [
"noreply@github.com"
] | noreply@github.com |
971930662e9f48b55e5e7268f17b00a473b909c6 | 4fb5b869f6690b73e32a2d8624f5fc8954540b42 | /pypiplot/examples.py | b73f61adfb29a23b32d768d116b50680a0502255 | [
"MIT"
] | permissive | erdogant/pypiplot | cc8eb15f9b6855cba270256591ba8b1ec4ae41f6 | 2016cca3d0b4022cda1806c2c4b8c4eb2d31ee19 | refs/heads/master | 2023-04-16T03:26:26.935072 | 2023-02-21T23:46:01 | 2023-02-21T23:46:01 | 293,334,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,271 | py | import pypiplot
# print(pypiplot.__version__)
# print(dir(Pypiplot))
from pypiplot import Pypiplot
# %% Update all libraries to date.
pp = Pypiplot(username='erdogant', repo_type=['owner', 'fork'])
pp.update()
results = pp.stats()
pp.plot_year(vmin=700)
pp.plot()
pp.plot_year()
# %% Top 10 best repos
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
pp.stats()
# Get top 10
repo=pp.results['data'].sum().sort_values()[-10:].index.values
# Get stats for the top10
pp.stats(repo=repo)
# Plot
pp.plot()
#
pp.plot_year()
#
pp.plot_cal()
#
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap_full.html'
pp.plot_heatmap(vmin=10, vmax=2000, cmap='interpolateOranges', path=path)
# %% Plot
# Init
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
results = pp.stats()
# Store svg on github.io
# path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'C://temp/pypi_heatmap.html'
pp.plot_year(path=path, vmin=700)
# Store all repo info in github.io
pp.plot(legend=False)
# %% D3blocks
pp = Pypiplot(username='d3blocks')
pp.update(repo=['d3blocks'])
pp.stats(repo='d3blocks')
pp.plot()
# %%
pp = Pypiplot(username='erdogant')
pp.stats(repo='distfit')
pp.plot_year()
pp.plot(vmin=25)
# %% Update single repo
pp.update(repo=['bnlearn'])
pp.update(repo='bnlearn')
results = pp.stats(repo=['distfit','pca', 'bnlearn'])
pp.plot(legend=True)
# %% Get some stats
results = pp.stats(repo=['df2onehot','pca','bnlearn','ismember','thompson'])
pp.plot(legend=True)
# %%
pp = Pypiplot(username='erdogant')
pp.stats(repo='distfit')
pp.plot_year()
pp.plot(vmin=25)
pp.stats(repo='worldmap')
pp.plot_year()
pp.stats(repo='hnet')
pp.plot_year()
pp.stats(repo='ismember')
pp.plot_year()
pp.stats(repo='flameplot')
pp.plot_year()
pp.stats(repo='pca')
pp.plot_year()
pp.stats()
pp.stats(repo=['df2onehot','clustimage','bnlearn','distfit','pypickle','clusteval','findpeaks', 'kaplanmeier','pca','colourmap'])
pp.results['data'].rolling(window=30).mean().plot(figsize=(15,10))
plt.grid(True)
plt.xlabel('Time')
plt.ylabel('Average nr. download based on a rolling window of 30 days')
# pp.results['data'].cumsum().plot()
pp.plot_year(vmin=100)
pp.plot(vmin=25)
pp.results['data'].cumsum().plot()
# %% Plot bnlearn
results = pp.stats(repo='bnlearn')
pp.plot_year()
# %%
pp.update()
results = pp.stats()
pp.plot_year(vmin=700)
pp.plot(vmin=25)
# %% Plot
# Init
pp = Pypiplot(username='erdogant', savepath='D://REPOS/pypiplot/repo_data/')
# Get download statistics
results = pp.stats()
# Store svg on github.io
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap.html'
path = 'C://temp/pypi_heatmap.html'
pp.plot_year(path=path, vmin=700)
# Store all repo info in github.io
path = 'D://REPOS/erdogant.github.io/docs/imagesc/pypi/pypi_heatmap_repos.html'
pp.plot(path=path, vmin=100)
# %%
from pypiplot import Pypiplot
# results = pp.stats()
pp.stats(repo=['df2onehot','clustimage','bnlearn','distfit','pypickle','clusteval','findpeaks', 'kaplanmeier','colourmap'])
pp.plot_cal(method='mean', vmin=100)
pp.plot(method='mean')
# %%
| [
"erdogant@gmail.com"
] | erdogant@gmail.com |
596c977a72f3fb8a47089d9ab9c5f443a05cdcdc | 58c505e5883ce4975517eb00544c5871647b6f69 | /blog/migrations/0001_initial.py | 35a93a385d0ab7a9ce0027632d373f27e192d98d | [] | no_license | kalyanit17/my-first-blog | 0ae6e078d87f64d475bd9e077ac53b4939cb1ddf | e372db9a5741902326b4a8fd2b7591cd2e21db54 | refs/heads/master | 2020-12-31T06:46:29.128949 | 2017-02-17T06:47:38 | 2017-02-17T06:47:38 | 80,627,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 13:01
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=datetime.datetime(2017, 2, 1, 13, 1, 49, 908000, tzinfo=utc))),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"kalyanit17@gmail.com"
] | kalyanit17@gmail.com |
11bb47901662be6ed2b74c9e23f7bc18d66e6842 | 5d7256e74c692da0da5f5dea9d8cb5cd8e191654 | /MostPopularSuperHero.py | 62d9bedbd6911479cb0f49986a23aaecca143d4f | [] | no_license | rakeshadk7/Spark | ec2fc2ba7c16f9affa4cbc87e456eff452680bb0 | a76af73c066b78eefcaeaf60f360f897e45da3dc | refs/heads/master | 2021-05-03T23:51:27.651546 | 2016-10-27T19:35:46 | 2016-10-27T19:35:46 | 71,810,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from pyspark import SparkConf, SparkContext
import os
conf = SparkConf().setMaster("local").setAppName("PopularHero")
sc = SparkContext(conf = conf)
def countCoOccurences(line):
fields = line.split()
return (int(fields[0]), len(fields) - 1)
def parseNames(line):
fields = line.split("\"")
return (int(fields[0]), fields[1].encode("utf8"))
path = os.path.abspath("C:\Users\RAdhikesavan\Documents\Personal\SparkCourse\\Marvel-Graph.txt")
occurences = sc.textFile(path)
path = os.path.abspath("C:\Users\RAdhikesavan\Documents\Personal\SparkCourse\\Marvel-Names.txt")
names = sc.textFile(path)
pairings = occurences.map(countCoOccurences)
namesRdd = names.map(parseNames)
totalFriends = pairings.reduceByKey(lambda x, y: x + y)
flipped = totalFriends.map(lambda xy: (xy[1], xy[0]))
mostPopular = flipped.max()
mostPopularName = namesRdd.lookup(mostPopular[1])[0]
print(str(mostPopularName) + " is the most popular superhero, with " + \
str(mostPopular[0]) + " co-appearances.")
| [
"rakesh.adk7@gmail.com"
] | rakesh.adk7@gmail.com |
acc4aef5d2a6eb365488380fe43780058d19a3d6 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1623+386/sdB_PG_1623+386_lc.py | 79bf55d3e02753efc2d185b0d3025a46f7a7b55a | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[246.351292,38.505214], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1623+386 /sdB_PG_1623+386_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
6567d0f8b19425ebfd1cd990c73c0e2498f971f2 | 41294ab88364fbb40ee67fcc643a91cc355c25d5 | /solution/accounting.py | 368251986f18af4b2806c42760073666909b3c70 | [] | no_license | tessajules/underpaid-customers-HB-homework | 96e542cc736d03b1476c88c43cd931081b03926d | ec3526debea68ecbf7aed25d041baf26110e40b2 | refs/heads/master | 2021-05-28T22:11:25.106565 | 2015-04-10T02:56:36 | 2015-04-10T02:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | MELON_COST = 1.00
def melon_payment_calculator(payment_data):
"""Calculate cost of melons and determine who has underpaid."""
payment_data = open(payment_data)
for line in payment_data:
order = line.split('|')
customer_name = order[1]
customer_first = customer_name.split(" ")[0]
customer_melons = float(order[2])
customer_paid = float(order[3])
customer_expected = customer_melons * MELON_COST
if customer_expected < customer_paid:
print customer_name, "paid %.2f, expected %.2f" % (
customer_paid, customer_expected)
print customer_first, "has overpaid for their melons."
elif customer_expected > customer_paid:
print customer_name, "paid %.2f, expected %.2f" % (
customer_paid, customer_expected)
print customer_first, "has underpaid for their melons."
melon_payment_calculator("customer-orders.txt") | [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
1944a86dacbfb1bc3786a49c8b508979384b9555 | 99b496588937e7a4269d7614f9d2c9df701602b6 | /2017年04月14日_P14_recommendations.py | 993fa90f3da40c5f8ef77bc303d8504281a8e98d | [] | no_license | kedup/2017-04-13-_CollectiveIntelligence | bedb909d0f8e2404c693aab587d1a44095ef7b38 | 3e95fea24d6f022ebab3ab52d6f27a4ce2b62d62 | refs/heads/master | 2021-06-16T07:51:25.770248 | 2017-05-11T12:10:50 | 2017-05-11T12:10:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,492 | py | # A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
from math import sqrt
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,p1,p2):
# Get the list of mutually rated items
si={}
for item in prefs[p1]:
if item in prefs[p2]:
si[item]=1
# Find the number of elements
n=len(si)
# if they are no ratings in common, return 0
if n==0: return 0
# Add up all the preferences
sum1=sum([prefs[p1][it] for it in si])
sum2=sum([prefs[p2][it] for it in si])
# Sum up the squares
sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
sum2Sq=sum([pow(prefs[p2][it],2) for it in si])
# Sum up the products
pSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])
# Calculate Person score
num=pSum-(sum1*sum2/n)
den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
if den==0: return 0
r=num/den
return r
# Returns the best matches for person from the prefs dictionary
# Numbers of results and similarity function are optional params
def topMatches(prefs,person,n=5,similarity=sim_pearson):
scores=[(similarity(prefs,person,others),others)
for others in prefs if others!=person]
# Sort the list so the highest scores appear at the top
scores.sort()
scores.reverse()
return scores[0:n]
print(topMatches(critics,'Toby',n=3))
print(sim_pearson(critics,'Lisa Rose','Gene Seymour'))
| [
"webkwd@qq.com"
] | webkwd@qq.com |
a60ce595e94bd01b6f46c0cb382957eebfd7ab07 | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /Tensorflow/CNN/莫烦python02.py | 2e08d2c51048bcd31c14f4a4a131722ae38111f1 | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data",one_hot=True)
def add_layer(inputs,in_size,out_size,activaion_function = None):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
Wx_plus_b = tf.matmul(inputs,Weights)+biases
if activaion_function is None:
outputs = Wx_plus_b
else:
outputs =activaion_function(Wx_plus_b)
return outputs
def compute_accuracy(v_xs,v_ys):
global prediction
y_pre = sess.run(prediction,feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
return result
xs = tf.placeholder(tf.float32,[None,784])
ys = tf.placeholder(tf.float32,[None,10])
# add output layer
prediction = add_layer(xs,784,10,activaion_function=tf.nn.softmax)
# error
crosss_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(crosss_entropy)
sess =tf.Session()
sess.run(tf.initialize_all_variables())
for i in range(5000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
if i%50==0:
print(compute_accuracy(mnist.test.images,mnist.test.labels)) | [
"864773190@qq.com"
] | 864773190@qq.com |
09d975c569575f2a776cc28ff00f7e2fc34a7637 | 7bb14c6587a73b28f8e99b8ba9d02a1aaa08fa27 | /samfilterdg.py | 84cecf9605f4a50ab58fd0557ab54268534ed4b8 | [] | no_license | zhipenglu/duplex | 95a570c89bdac9efcd03dcabcfc745d2aa46728d | 62c845cad98fda8b9cfd45856e3f469a249b216f | refs/heads/master | 2020-12-29T02:42:31.242209 | 2017-02-01T19:31:18 | 2017-02-01T19:31:18 | 52,380,558 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | """
samfilterdg.py
Zhipeng Lu, 2015-10-16
Read in sam file with DG (duplex group) and XG (chiastic) tags,
filter out DGs with only 1 read and those with identical breaks, XG:i:2
"""
import sys, re, time
if len(sys.argv) < 3:
print "samfilterdg.py"
print "removes DGs with only single or duplicate reads and XG:i:2."
print "Usage: python samfilterdg.py inputsam outputsam"
sys.exit()
inputsam = sys.argv[1]
outputsam = sys.argv[2]
inputsamh = open(inputsam, 'r')
outputsamh = open(outputsam, 'w')
samheader = ''
numinputreads = 0
numoutputreads = 0
numoutputdg = 0
dgdict = {} #dgname: [dgreads]
for line in inputsamh: #construct a dictionary with all DGs
if line[0] == "@":
samheader += line
continue
numinputreads += 1
record = line.strip('\n').split()
if len(record) < 21: continue
cigar = record[5]
dgname = record[20]
md = record[16].split(":")[-1]
mdnum = len(re.findall('\d+[ATCG]', md))
if record[19] == "XG:i:2" or len(dgname) <= 5 or mdnum > 1 : continue
#remove reads with >1 mismatches, remove wrong DGs
if not dgname in dgdict:
dgdict[dgname] = [line]
else: dgdict[dgname].append(line)
if not numinputreads%10000:
print time.strftime("%Y-%m-%d:%H:%M:%S"), "processed", numinputreads
#remove DG where all the reads have identical breaks in the CIGAR strings
#simply compare the N substrings for now
dgnamelist = dgdict.keys()
allreads = ''
for dgname in dgnamelist:
dgreads = dgdict[dgname]
breaklist = []
for read in dgreads:
record = read.strip('\n').split()
cigar = record[5]
cigarbits = tuple(re.findall('\d+[N]', cigar))
breaklist.append(cigarbits)
if len(list(set(breaklist))) == 1 :
dgdict.pop(dgname)
continue
numoutputdg += 1
numoutputreads += len(dgreads)
dgout = ''.join(dgreads)
allreads += dgout
outputsamh.write(samheader)
outputsamh.write(allreads)
print "\nNumber of input reads:", numinputreads
print "Number of filtered reads:", numoutputreads
print "Number of filtered duplex groups:", numoutputdg
inputsamh.close()
outputsamh.close()
"""
samheader = ''
numinputreads = 0
numoutputreads = 0
numoutputdg = 0
dgreadslist = []
outstring = ''
line = inputsamh.readline()
while line[0] == "@":
samheader += line
line = inputsamh.readline()
outputsamh.write(samheader)
record = line.strip('\n').split()
lastdgname = record[20]
dgreadslist.append(line)
for line in inputsamh:
numinputreads += 1
record = line.strip('\n').split()
cigar = record[5]
if len(record) <21: continue
dgname = record[20]
if dgname == lastdgname: dgreadslist.append(line)
else:
md = record[16].split(":")[-1]
if "A" in md or "T" in md or "C" in md or "G" in md: continue
breaklist = []
for read in dgreadslist:
record = read.strip('\n').split()
cigar = record[5]
cigarbits = tuple(re.findall('\d+[N]', cigar))
breaklist.append(cigarbits)
if len(list(set(breaklist))) > 1 :
print "Passed filter:", dgname
numoutputdg +=1
numoutputreads += len(dgreadslist)
dgout = ''.join(dgreadslist)
outstring += dgout
lastdgname = dgname
dgreadslist = [line]
if not numoutputdg%1000:
outputsamh.write(outstring)
outstring = ''
print "\nNumber of input reads:", numinputreads
print "Number of filtered reads:", numoutputreads
print "Number of filtered duplex groups:", numoutputdg
inputsamh.close()
outputsamh.close()
"""
| [
"zhipengluchina@gmail.com"
] | zhipengluchina@gmail.com |
6b5ecf59aee91287debfc7a264002ca531ac0eb0 | a657d3151ec97e2cbb184d967fd7bbc2d6167253 | /Decision-Tree-with-Adaboost/MainCode/DecisionTreeFull.py | b064f071d27d83ad1664b989b1f3e09f683c804b | [] | no_license | hasin-abrar/Machine-Learning | 98e370291f5fb073db77a2c4e3b5be0ce33fb8d2 | 72b5db6e7815aebe7e477c73d6765e34ad7a91b4 | refs/heads/master | 2022-12-23T21:02:03.049735 | 2020-10-06T14:42:46 | 2020-10-06T14:42:46 | 286,272,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,422 | py | # pre processing
import math
import random
import datetime
import numpy as np
import pandas as pd
class PreProcessing(object):
def __init__(self, examples):
self.examples = examples
# takes a list as input and gives the mode. (1)[0][0] 1st appearance and more signifies
def Most_Common(self, lst):
from collections import Counter
data = Counter(lst)
return data.most_common(1)[0][0]
# missing_col = Find_Missing_Col(examples,4)
def Find_Missing_Col(self,attr_length):
missing_col = []
for i in range(attr_length):
for e in examples:
if e[i] == " ":
# print(e[i])
missing_col.append(i)
elif isinstance(e[i], str):
continue
# print(type(e[i]))
# print(len(e[i]),e[i])
if math.isnan(e[i]):
missing_col.append(i)
break
return missing_col
# number_type is a list
# number_type = [1,2]
# Replace_With_Mean(examples,number_type)
def Replace_With_Mean(self, examples_df, examples, number_type):
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN',
strategy='mean', axis=0)
for n in number_type:
imputer = imputer.fit(examples[:, n:(n + 1)])
examples[:, n:(n + 1)] = imputer.transform(examples[:, n:(n + 1)])
# examples = Remove_Useless_Rows(examples)
def Remove_Useless_Rows(self, examples):
index = -1
for e in examples:
index += 1
last_value = e[-1]
# print(last_value,index)
if isinstance(last_value, str):
continue
if math.isnan(last_value):
# print(index)
examples = np.delete(examples, index, 0)
# print(examples)
return examples
def Replace_With_Mode(self, examples, string_type):
for s in string_type:
single_col = examples[:, s]
_max_appearance = self.Most_Common(single_col)
for j in range(len(single_col)):
if isinstance(single_col[j], str):
continue
if math.isnan(single_col[j]):
single_col[j] = _max_appearance
def GetBooleanEntropy(self, yes, no):
succ_prob = (yes / (yes + no))
if succ_prob == 0:
return 0
elif succ_prob == 1:
return 0
# print ("succ : ",succ_prob)
return -(succ_prob * math.log2(succ_prob) + (1 - succ_prob) * math.log2((1 - succ_prob)))
def Get_Split_Val(self, examples, index):
# selected_col = examples
sorted_col = sorted(examples, key=lambda k: k[index])
print(sorted_col)
start = sorted_col[0][index] - 10
# end = sorted_col[0] + 10
class_col = examples[:, -1]
yes = []
no = []
yes.append(0)
yes.append(0)
no.append(0)
no.append(0)
pos = neg = 0
# print(class_col)
for c in class_col:
if c == "Yes":
yes[1] += 1
else:
no[1] += 1
pos = yes[1]
neg = no[1]
# print(yes, no)
init_entropy = self.GetBooleanEntropy(yes[1], no[1])
_max = 0
split = start
for j in range(len(sorted_col) - 1):
mid = (sorted_col[j][index] + sorted_col[j + 1][index]) / 2
remainder_attrb_entropy = 0
if sorted_col[j][-1] == "Yes":
yes[0] += 1
yes[1] -= 1
else:
no[0] += 1
no[1] -= 1
for k in range(2):
remainder_attrb_entropy += ((yes[k] + no[k]) / (pos + neg)) * self.GetBooleanEntropy(yes[k], no[k])
gain = init_entropy - remainder_attrb_entropy
if gain > _max:
_max = gain
split = mid
# print(split)
return split
def Binarization(self, examples, num_type):
for n in num_type:
split_val = self.Get_Split_Val(examples, n)
# print("##########")
# print("attribute ", n, " : ", split_val)
changed_col = examples[:, n]
for i in range(len(changed_col)):
if changed_col[i] <= split_val:
changed_col[i] = -1 # making all values having same type
else:
changed_col[i] = +1
def GetAttributeList(self, dataframe):
attr_list = []
for i in range(len(list(dataframe)) - 1):
attr_list.append(i)
return attr_list
def GetAtrributeLength(self, dataframe):
return len(dataframe.columns) - 1
def GetAtrributeMapping(self,examples,attr_length):
attr_mapping = {}
index = {}
for i in range(attr_length):
single_col = examples[:,i]
attr_types = list( set(single_col) )
attr_name = i
attr_mapping[attr_name] = attr_types
index[attr_name] = i
return attr_mapping, index
def DoLastColEncoding(self,examples,last_col,choice):
y = examples[:,-1]
for i in range(len(examples)):
y[i] = y[i].strip()
if choice == 2:
y[i] = y[i].strip('.')
if y[i] == last_col[0]:
y[i] = 1
else:
y[i] = -1
return y
def GetTrainTestSplit(self,x,y,split_size):
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
return train_test_split(x, y,test_size=split_size,random_state=60)
class Node:
def __init__(self,val,isLeaf):
self.child = []
self.val = val
self.isLeaf = isLeaf
# sutree is also a node
def insert(self,subtree):
self.child.append(subtree)
class DecisionTree:
def __init__(self,attr_mapping,index,depth_max):
self.attr_mapping = attr_mapping
self.index = index
self.depth_max = depth_max
def setMaxDepth(self,depth):
self.depth_max = depth
def GetBooleanEntropy(self,yes,no):
if (yes + no) == 0:
# print("WHAT")
return 0
succ_prob = (yes / (yes + no))
if succ_prob == 0:
return 0
elif succ_prob == 1:
return 0
else:
# print ("succ : ",succ_prob)
return -(succ_prob * math.log2(succ_prob) + (1 - succ_prob) * math.log2((1 - succ_prob)))
# attribute is a String, index is an integer
def Importance(self,attribute,x_train,y_train, index):
yes = no = 0
remainder_attrb_entropy = 0
for y in y_train:
if y == 1: #this means class "Yes"
yes+=1
else:
no+=1
attr_entropy = self.GetBooleanEntropy(yes,no)
# all the attribute values of that attribute = list
# attr_vals is a list
attr_vals = self.attr_mapping[attribute]
pos = []
neg = []
for j in range(len(attr_vals)):
pos.append(0)
neg.append(0)
for i in range(len(x_train)):
for j in range(len(attr_vals)):
# example has the same attribute value
if x_train[i][index] == attr_vals[j] :
if y_train[i] == 1:
pos[j] += 1
else:
neg[j] += 1
break
for k in range(len(attr_vals)):
weight = ((pos[k] + neg[k])/(yes+no) )
# print(weight)
remainder_attrb_entropy += weight* self.GetBooleanEntropy(pos[k],neg[k] )
return attr_entropy - remainder_attrb_entropy
# attributes is a list of attribute(String)
def Dec_Tree_Learning(self,x_train,y_train,attributes,par_x_train,par_y_train,depth):
same_class = 1
yes = no = 0
if depth >= self.depth_max:
return self.Plurality_Value(y_train)
for y in y_train:
if y == 1:
yes += 1
else:
no += 1
if yes >0 and no >0:
same_class = 0
break
if len(x_train) == 0:
return self.Plurality_Value(par_y_train)
elif same_class == 1:
if yes > 0 :
return Node(1,1)
else :
return Node(-1,1)
elif len(attributes) == 0:
return self.Plurality_Value(y_train)
else:
_max = -1
root = attributes[0]
for a in attributes: # 'a' is an int
importance = self.Importance(a,x_train,y_train,self.index[a])
if importance > _max:
_max = importance
root = a
tree = Node(root,0)
attribute_list = self.attr_mapping[root]
for a in attribute_list: # each a is a attribute value
child_x_train = []
child_y_train = []
for i in range(len(x_train)):
# attribute index and its corresponding value in example e[index[root]]
if x_train[i][self.index[root]] == a:
child_x_train.append(x_train[i])
child_y_train.append(y_train[i])
new_attributes = []
for a in attributes:
if a == root:
continue
new_attributes.append(a)
subtree = self.Dec_Tree_Learning(child_x_train,child_y_train,new_attributes,x_train,y_train,depth+1)
tree.insert(subtree)
return tree
def Plurality_Value(self,y_val):
yes = no = 0
for y in y_val:
if y == 1:
yes+=1
else:
no+=1
if yes > no:
return Node(1,1) # 1st 1 = Yes
else:
return Node(-1,1) # 1st 0 = No
def Prediction(self,x_test, node):
if node.isLeaf == 1 :
return node.val
attr = node.val
attr_list = self.attr_mapping[attr]
indx = self.index[attr]
found = False
next_node = Node(0,0)
for i in range(len(attr_list)):
if x_test[indx] == attr_list[i]:
found = True
next_node = node.child[i]
break
if found != True :
print(indx," Default in Searching !",x_test)
defaultNode = self.Plurality_Value(x_test)
return defaultNode.val
else:
return self.Prediction(x_test,next_node)
def Adaboost(self,x_train,y_train, k_count, attributes):
h = []
z = []
weight = []
x_train_index = []
y_train_index = []
for i in range(len(x_train)):
weight.append((1 / len(x_train)))
x_train_index.append(i)
y_train_index.append(i)
# print(weight)
for k in range(k_count):
z.append(0.0)
node = Node(0,0)
h.append(node)
next_x_train = []
next_y_train = []
# data = examples_dataframe.sample(len(examples_dataframe), weights=weight)
data = np.random.choice(x_train_index, len(x_train_index), p=weight)
for ind in data:
next_x_train.append(x_train[ind])
next_y_train.append(y_train[ind])
h[k] = self.Dec_Tree_Learning(next_x_train,next_y_train, attributes,[], [], 0)
error = 0
for j in range(len(x_train)):
if self.Prediction( x_train[j],h[k]) != y_train[j]:
error += weight[j]
if error > 0.5:
k -= 1
print("K KOMSEEEE")
continue
# print(k," Error : ",error)
for j in range(len(x_train)):
if self.Prediction( x_train[j],h[k]) == y_train[j]:
weight[j] *= (error / (1 - error))
# weight = preprocessing.normalize(weight)
weight = [ float(i) / sum(weight) for i in weight ]
z[k] = math.log10((1 - error) / error)
return Weighted_Majority(h,z)
def Prediction_Stump(self,weighted_majority, k_count, x_test):
val = 0
h = weighted_majority.h
z = weighted_majority.z
for i in range(k_count):
pred = self.Prediction( x_test,h[i])
# print (pred, z[i])
val += ( pred* z[i] )
# print("final : ",val)
if val > 0:
return 1
else:
return -1
class Weighted_Majority:
def __init__(self,h,z):
self.z = z
self.h = h
def PreProcessData(dataset_frame,number_type,replace_with_mean,dropping_col,last_col,choice):
dataset_frame = dataset_frame.replace(' ', np.NaN)
dataset_frame = dataset_frame.replace('?', np.NaN)
dataset_frame.drop(dataset_frame.columns[dropping_col], axis=1, inplace=True)
examples = dataset_frame.iloc[:, :].values
examples_df = dataset_frame.iloc[:, :]
# x = dataset_frame.iloc[:, :-1].values
# y = dataset_frame.iloc[:, -1].values
if choice == 0:
extra_sample_count = 7
examples_filtered = []
examples_filtered_index = []
for i in range(len(examples)):
if examples[i][-1] == 1:
examples_filtered.append(list(examples[i]) )
examples_filtered_index.append(i)
indx_count = 0
while(True):
rand_index = random.randint(0,len(examples) - 1)
if rand_index not in examples_filtered_index:
examples_filtered.append(list(examples[rand_index]) )
indx_count += 1
if indx_count == extra_sample_count:
break
random.shuffle(examples_filtered)
print(examples)
examples_filtered = np.array(examples_filtered)
print("########### FILTERED ##############")
print(examples_filtered)
examples = examples_filtered
pre_processing = PreProcessing(examples)
attr_list = pre_processing.GetAttributeList(examples_df)
# attr_length does not include label (last col)
attr_length = pre_processing.GetAtrributeLength(dataset_frame)
replace_with_mode = []
for i in range(attr_length):
if i in number_type:
continue
replace_with_mode.append(i)
examples = pre_processing.Remove_Useless_Rows(examples)
pre_processing.Replace_With_Mean(examples_df, examples, replace_with_mean)
pre_processing.Replace_With_Mode(examples, replace_with_mode)
for n in number_type:
examples[:,n:(n+1)] = examples[:,n:(n+1)].astype(np.float64)
pre_processing.Binarization(examples, number_type)
attr_mapping, index = pre_processing.GetAtrributeMapping(examples, attr_length)
y = pre_processing.DoLastColEncoding(examples, last_col,choice)
x = examples[:, :-1]
return x, y, attr_mapping, index, attr_list,pre_processing
##################### START #######################
choice = 0
dropping_col = []
number_type = []
dataset_test = []
if choice == 0:
dataset = pd.read_csv("Data.csv")
last_col = ["1", "0"]
dropping_col = []
replace_with_mean = number_type = [1, 2]
elif choice == 1:
dataset = pd.read_csv("1/1.csv")
last_col = ["Yes", "No"]
dropping_col = [0]
replace_with_mean = number_type = [4, 17, 18]
elif choice == 2:
dataset = pd.read_csv("2/2.csv",header = None)
dataset_test = pd.read_csv("2/2_test.csv",header = None)
last_col = ["<=50K", ">50K"]
dropping_col = []
number_type = [0, 2, 4, 10, 11, 12]
replace_with_mean = [0,2,4,12]
elif choice == 3:
dataset = pd.read_csv("3/3.csv")
last_col = ["1", "0"]
dropping_col = []
attrb_length = len(dataset.columns) - 1
for i in range(attrb_length):
number_type.append(i)
# number_type = [0, 2, 4, 10, 11, 12]
print(number_type,len(number_type))
replace_with_mean = number_type
else:
dataset = pd.read_csv("Data.csv")
last_col = ["Yes", "No"]
dropping_col = []
replace_with_mean = number_type = [1, 2]
x, y, attr_mapping, index, attr_list,pre_processing = PreProcessData(dataset,number_type, replace_with_mean,dropping_col,last_col,choice)
'''
dataset = dataset.replace(' ',np.NaN)
dataset = dataset.replace('?',np.NaN)
dataset.drop(dataset.columns[dropping_col], axis=1, inplace=True)
x = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
examples = dataset.iloc[:, :].values
examples_df = dataset.iloc[:, :]
pre_processing = PreProcessing(examples)
attr_list = pre_processing.GetAttributeList(examples_df)
# attr_length does not include label (last col)
attr_length = pre_processing.GetAtrributeLength(dataset)
for i in range(attr_length):
if i in number_type:
continue
string_type.append(i)
# missing_col = pre_processing.Find_Missing_Col(attr_length)
# print(missing_col)
examples = pre_processing.Remove_Useless_Rows(examples)
pre_processing.Replace_With_Mean(examples_df,examples,number_type)
pre_processing.Replace_With_Mode(examples,string_type)
pre_processing.Binarization(examples,number_type)
attr_mapping, index = pre_processing.GetAtrributeMapping(examples,attr_length)
y = pre_processing.DoLastColEncoding(examples,last_col)
x = examples[:,:-1]
'''
if choice == 2:
x_train = x
y_train = y
x_test, y_test,*rest = PreProcessData(dataset_test, number_type, replace_with_mean, dropping_col, last_col,choice)
else:
x_train, x_test, y_train, y_test = pre_processing.GetTrainTestSplit(x,y,split_size=0.2)
# print(x_train,"\n",y_train,"\n", x_test ,"\n", y_test)
# print(examples)
############### Decision Tree Learning ############
decision_tree = DecisionTree(attr_mapping,index, math.inf)
decision_tree_adaboost = DecisionTree(attr_mapping,index,1)
print(len(x_train), len(x_test))
tree = decision_tree.Dec_Tree_Learning(x_train,y_train,attr_list,[],[],0)
not_match = match = 0
# print(tree)
for i in range(len(x_test)):
# y_test[i] = y_test[i].strip()
# if choice == 2:
# y_test[i] = y_test[i].strip('.')
if decision_tree.Prediction(x_test[i],tree) == y_test[i]:
match += 1
# print("Match")
else:
not_match += 1
# print("Does not match")
print(match,not_match)
accuracy = (match)/ (match + not_match) * 100
print("Decision Tree : ",accuracy,"%","Time : ",datetime.datetime.now().time())
print("*******Adaboost*******")
k_list = [5,10,15,20]
for k in k_list:
k_count = k
weighted_majority = decision_tree_adaboost.Adaboost(x_train,y_train,k_count,attr_list)
not_match = match = 0
for i in range(len(x_test)):
if decision_tree_adaboost.Prediction_Stump(weighted_majority,k_count,x_test[i]) == y_test[i]:
match += 1
# print("Match")
else:
not_match += 1
# print("Does not match")
accuracy = (match)/ (match + not_match) * 100
print("LoopCount : ",k," accuracy : ",accuracy,"%","Time : ",datetime.datetime.now().time())
# ''' | [
"1405048.mha@ugrad.cse.buet.ac.bd"
] | 1405048.mha@ugrad.cse.buet.ac.bd |
67297bf9c3b97968aa6fd2d91e6b6bc95004c561 | 80eff326f1d78c4dce8d15aa927b07e0b65af798 | /third_party/mmpose_model/transforms.py | f03bd453db0410603facd9c6360d3708c83d5a40 | [] | no_license | animalact/AAR_Net | 4e228651a8a693cb8b91ae1c99f2af56f00dce8a | 9f88a6678d3d6bfbba2dc654c1e0dcac43e6d6b7 | refs/heads/master | 2023-07-25T20:37:08.182423 | 2021-09-08T17:30:33 | 2021-09-08T17:30:33 | 384,944,081 | 1 | 5 | null | 2021-09-08T15:13:03 | 2021-07-11T12:36:51 | Python | UTF-8 | Python | false | false | 8,407 | py | import cv2
import numpy as np
def transform_preds(coords, center, scale, output_size, use_udp=False):
"""Get final keypoint predictions from heatmaps and apply scaling and
translation to map them back to the image.
Note:
num_keypoints: K
Args:
coords (np.ndarray[K, ndims]):
* If ndims=2, corrds are predicted keypoint location.
* If ndims=4, corrds are composed of (x, y, scores, tags)
* If ndims=5, corrds are composed of (x, y, scores, tags,
flipped_tags)
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
output_size (np.ndarray[2, ] | list(2,)): Size of the
destination heatmaps.
use_udp (bool): Use unbiased data processing
Returns:
np.ndarray: Predicted coordinates in the images.
"""
assert coords.shape[1] in (2, 4, 5)
assert len(center) == 2
assert len(scale) == 2
assert len(output_size) == 2
# Recover the scale which is normalized by a factor of 200.
scale = scale * 200.0
if use_udp:
scale_x = scale[0] / (output_size[0] - 1.0)
scale_y = scale[1] / (output_size[1] - 1.0)
else:
scale_x = scale[0] / output_size[0]
scale_y = scale[1] / output_size[1]
target_coords = np.ones_like(coords)
target_coords[:, 0] = coords[:, 0] * scale_x + center[0] - scale[0] * 0.5
target_coords[:, 1] = coords[:, 1] * scale_y + center[1] - scale[1] * 0.5
return target_coords
def get_affine_transform(center,
scale,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ] | list(2,)): Size of the
destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(scale) == 2
assert len(output_size) == 2
assert len(shift) == 2
# pixel_std is 200.
scale_tmp = scale * 200.0
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def _get_3rd_point(a, b):
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
"""
assert len(a) == 2
assert len(b) == 2
direction = a - b
third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
return third_pt
def rotate_point(pt, angle_rad):
"""Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
"""
assert len(pt) == 2
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
new_x = pt[0] * cs - pt[1] * sn
new_y = pt[0] * sn + pt[1] * cs
rotated_pt = [new_x, new_y]
return rotated_pt
def flip_back(output_flipped, target_type='GaussianHeatmap'):
assert output_flipped.ndim == 4, \
'output_flipped should be [batch_size, num_keypoints, height, width]'
shape_ori = output_flipped.shape
channels = 1
if target_type.lower() == 'CombinedTarget'.lower():
channels = 3
output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]
output_flipped = output_flipped.reshape(shape_ori[0], -1, channels,
shape_ori[2], shape_ori[3])
output_flipped_back = output_flipped.copy()
# Swap left-right parts
flip_pairs = [[5,6],[7,8],[9,10],[11,12]]
for left, right in flip_pairs:
output_flipped_back[:, left, ...] = output_flipped[:, right, ...]
output_flipped_back[:, right, ...] = output_flipped[:, left, ...]
output_flipped_back = output_flipped_back.reshape(shape_ori)
# Flip horizontally
output_flipped_back = output_flipped_back[..., ::-1]
output_flipped_back[:, :, :, 1:] = output_flipped_back[:, :, :, :-1]
return output_flipped_back
##########################################################################
def trans_affine(img, center, scale, rotation, size):
trans = get_affine_transform(center, scale, rotation, size)
img = cv2.warpAffine(
img,
trans, size,
flags=cv2.INTER_LINEAR)
return img
def trans_reshape(img):
img = img.astype(np.float16)
img = img.transpose(2,0,1)
img = img/255
return img
def trans_normalize(img, mean, std):
img = ((img.transpose()-np.array(mean))/std).transpose()
return img
def trans_expand(img):
img = np.expand_dims(img, axis=0)
return img
######################################################################
def reformCoord(coords, bbox):
x = int(bbox[0])
y = int(bbox[1])
w = int(bbox[2]) - x
h = int(bbox[3]) - y
assert w > 0 and h > 0
fx = w/h
fy = h/w
if h > w:
w_new = int(256 * fx)
pad = int((256-w_new)/2)
coords[:,0] -= pad
coords = np.multiply(coords, [h/256, h/256, 1])
coords = np.add(coords, [x, y, 0])
if w > h:
h_new = int(256 * fy)
pad = int((256-h_new)/2)
coords[:, 1] -= pad
coords = np.multiply(coords, [w / 256, w / 256, 1])
coords = np.add(coords, [x, y, 0])
return coords
def resizeData(img, bbox):
# img (h, w, c)
"""
['image_file', 'center', 'scale', 'bbox', 'rotation', 'joints_3d', 'joints_3d_visible', 'dataset', 'bbox_score', 'bbox_id', 'ann_info', 'img'])
"""
x = int(bbox[0])
y = int(bbox[1])
x1 = int(bbox[2])
y1 = int(bbox[3])
w = x1-x
h = y1-y
assert w>0 and h>0
img_clipped = img[y:y + h, x:x + w]
try:
if h > w:
fx = w / h
w_new = int(256 * fx)
pad = int((256 - w_new) / 2)
img_resize = cv2.resize(img_clipped, dsize=(w_new, 256))
img_pad = np.pad(img_resize, ((0, 0), (pad, 256 - w_new - pad), (0, 0)), 'constant', constant_values=0)
else:
fy = h / w
h_new = int(256 * fy)
pad = int((256 - h_new) / 2)
img_resize = cv2.resize(img_clipped, dsize=(256, h_new))
img_pad = np.pad(img_resize, ((pad, 256 - h_new - pad), (0, 0), (0, 0)), 'constant', constant_values=0)
return img_pad
except:
return None
def compose(img, img_metas):
# transform
img = trans_affine(img, img_metas[0]['center'], img_metas[0]['scale'], img_metas[0]['rotation'],
img_metas[0]['size'])
img = trans_reshape(img)
img = trans_normalize(img, mean=img_metas[0]['mean'], std=img_metas[0]['std'])
img = trans_expand(img)
img = img.astype(np.float32)
img_flipped = np.flip(img, 3)
return img, img_flipped
| [
"songys96@naver.com"
] | songys96@naver.com |
6827cbf6518d397d25b1759ecbab6d3cf03cb7f1 | f2b4be9a933aa024a7934ab9758a0b29816e74d3 | /Interfaces/API/NewInterface/Tests/Test_DDSim.py | d81a22add8109fbab36d9483992e42f789235e2f | [] | no_license | hamzazafar/ILCDIRAC | 84c24a4b65e75d7df55f91c3601867cc45990ee6 | 6fa2b7b130b6248afeb7ae77d42502f2f72908aa | refs/heads/master | 2020-03-25T03:39:54.444975 | 2017-07-28T10:51:18 | 2017-11-23T14:02:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,345 | py | #!/usr/local/env python
"""
Test DDSim module
"""
import inspect
import unittest
from mock import create_autospec, patch, MagicMock as Mock
from DIRAC import gLogger, S_OK, S_ERROR
from ILCDIRAC.Interfaces.API.NewInterface.Applications import DDSim
from ILCDIRAC.Tests.Utilities.GeneralUtils import assertEqualsImproved, assertDiracFailsWith, \
assertDiracSucceeds
__RCSID__ = "$Id$"
MODULE_NAME = 'ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim'
gLogger.setLevel("DEBUG")
gLogger.showHeaders(True)
#pylint: disable=protected-access
class DDSimTestCase( unittest.TestCase ):
""" Base class for the DDSim test cases
"""
def setUp(self):
"""set up the objects"""
self.dds = DDSim( {} )
def test_setrandomseed( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setRandomSeed( 89421 )
self.assertFalse( self.dds._errorDict )
assertEqualsImproved( self.dds.randomSeed, 89421, self )
def test_setrandomseed_fails( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setRandomSeed( [ 'abc' ] )
self.assertIn( '_checkArgs', self.dds._errorDict )
def test_setstartfrom( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setStartFrom( 89421 )
self.assertFalse( self.dds._errorDict )
assertEqualsImproved( self.dds.startFrom, 89421, self )
def test_setstartfrom_fails( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setStartFrom( 'adgiuj' )
self.assertIn( '_checkArgs', self.dds._errorDict )
def test_resolvelinkedparams( self ):
step_mock = Mock()
input_mock = Mock()
input_mock.getType.return_value = { 'abc' : False }
self.dds._linkedidx = 3
self.dds._jobsteps = [ None, None, None, input_mock ]
assertDiracSucceeds( self.dds._resolveLinkedStepParameters( step_mock ), self )
step_mock.setLink.assert_called_once_with( 'InputFile', { 'abc' : False }, 'OutputFile' )
def test_resolvelinkedparams_noinputstep( self ):
self.dds._linkedidx = None
self.dds._inputappstep = []
assertDiracSucceeds( self.dds._resolveLinkedStepParameters( None ), self )
def test_checkworkflow_app_missing( self ):
self.dds._inputapp = [ 'some_depdency', 'unavailable_dependency_fail_on_this' ]
self.dds._jobapps = [ 'myjobapp_1', 'some_dependency' ]
assertDiracFailsWith( self.dds._checkWorkflowConsistency(), 'job order not correct', self )
def test_checkworkflow_empty( self ):
self.dds._inputapp = []
self.dds._jobapps = []
assertDiracSucceeds( self.dds._checkWorkflowConsistency(), self )
def test_checkworkflow_success( self ):
self.dds._inputapp = [ 'some_dependency', 'other_dependencies', 'many_more' ]
self.dds._jobapps = [ 'ignore_me', 'many_more', 'some_dependency', 'other_dependencies' ]
assertDiracSucceeds( self.dds._checkWorkflowConsistency(), self )
def test_userjobmodules( self ):
module_mock = Mock()
assertDiracSucceeds( self.dds._userjobmodules( module_mock ), self )
def test_prodjobmodules( self ):
module_mock = Mock()
assertDiracSucceeds( self.dds._prodjobmodules( module_mock ), self )
def test_userjobmodules_fails( self ):
with patch('%s._setUserJobFinalization' % MODULE_NAME, new=Mock(return_value=S_OK('something'))),\
patch('%s._setApplicationModuleAndParameters' % MODULE_NAME, new=Mock(return_value=S_ERROR('some_test_err'))):
assertDiracFailsWith( self.dds._userjobmodules( None ),
'userjobmodules failed', self )
def test_prodjobmodules_fails( self ):
with patch('%s._setApplicationModuleAndParameters' % MODULE_NAME, new=Mock(return_value=S_OK('something'))), \
patch('%s._setOutputComputeDataList' % MODULE_NAME, new=Mock(return_value=S_ERROR('some_other_test_err'))):
assertDiracFailsWith( self.dds._prodjobmodules( None ),
'prodjobmodules failed', self )
def test_checkconsistency( self ):
self.dds.version = '134'
self.dds.detectorModel = 'mymodel.det'
self.dds.outputFile = 'myoutput.file'
self.dds._jobtype = 'User'
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertNotIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
self.assertNotIn( 'nbevts', self.dds.prodparameters )
self.assertNotIn( 'Process', self.dds.prodparameters )
def test_checkconsistency_nodetectormodel( self ):
self.dds.version = 123
self.dds.steeringFile = None
self.dds.detectorModel = None
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'no detectormodel set', self )
def test_checkconsistency_noversion( self ):
self.dds.version = None
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'no version found', self )
def test_checkconsistency_existsfails( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
with patch('os.path.exists', new=Mock(return_value=False)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_ERROR('testerr_exists_mock'))):
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'testerr_exists_mock', self )
def test_checkconsistency_userjob( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
self.dds._jobtype = 'notUser'
self.dds.detectorModel = 'myDetectorv200'
with patch('os.path.exists', new=Mock(return_value=True)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_ERROR('testerr_exists_mock'))):
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
for keyword in [ 'detectorType', 'slic_detectormodel' ]:
self.assertIn( keyword, self.dds.prodparameters )
def test_checkconsistency_userjob_notdetmodel( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
self.dds._jobtype = 'notUser'
self.dds.detectorModel = True
self.dds.setStartFrom( 148 )
with patch('os.path.exists', new=Mock(return_value=False)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_OK())):
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
for keyword in [ 'detectorType', 'slic_detectormodel' ]:
self.assertIn( keyword, self.dds.prodparameters )
#pylint: disable=protected-access
class TestDDSim( unittest.TestCase ):
"""tests for the DDSim interface"""
def setUp( self ):
pass
def tearDown( self ):
"""cleanup any files"""
pass
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_OK({'CLIC_o2_v03':"/some/path"})))
def test_setDetectorModel1( self ):
"""test DDSIm setDetectorModel part of software................................................."""
detModel = "CLIC_o2_v03"
ddsim = DDSim()
ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, detModel )
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_ERROR("No known models")))
def test_setDetectorModel2( self ):
"""test DDSIm setDetectorModel part of software failure........................................."""
detModel = "CLIC_o2_v03"
ddsim = DDSim()
res = ddsim.setDetectorModel( detModel )
self.assertEqual( res['Message'], "No known models" )
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_OK({'CLIC_o2_v04':"/some/path"})))
def test_setDetectorModel3( self ):
"""test DDSIm setDetectorModel is not known....................................................."""
detModel = "ATLAS"
ddsim = DDSim()
ret = ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, '' )
self.assertFalse( ret['OK'] )
self.assertIn( "Unknown detector model in ddsim: ATLAS", ret['Message'] )
@patch( "os.path.exists", new = Mock(return_value=True ) )
def test_setDetectorModel_TB_success( self ):
"""test DDSIm setDetectorModel tarBall success.................................................."""
detModel = "CLIC_o2_v03"
ext = ".tar.gz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel+ext )
self.assertEqual( ddsim.detectorModel, detModel )
self.assertTrue( detModel+ext in ddsim.inputSB )
@patch( "os.path.exists", new = Mock(return_value=False))
def test_setDetectorModel_TB_notLocal( self ):
"""test DDSIm setDetectorModel tarBall notLocal................................................."""
detModel = "CLIC_o2_v03"
ext = ".tgz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel+ext )
self.assertEqual( ddsim.inputSB, [] )
self.assertEqual( ddsim.detectorModel, detModel )
def test_setDetectorModel_LFN_succcess( self ):
"""test DDSIm setDetectorModel lfn success......................................................"""
detModel = "lfn:/ilc/user/s/sailer/CLIC_o2_v03.tar.gz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, "CLIC_o2_v03" )
self.assertTrue( detModel in ddsim.inputSB )
def test_setStartFrom1( self ):
"""test DDSIm setStartFrom 1...................................................................."""
ddsim = DDSim()
ddsim.setStartFrom( "Arg")
self.assertTrue( ddsim._errorDict )
def test_setStartFrom2( self ):
"""test DDSIm setStartFrom 2...................................................................."""
ddsim = DDSim()
ddsim.setStartFrom( 42 )
self.assertEqual( ddsim.startFrom, 42 )
def test_getKnownDetModels1( self ):
"""test getKnownDetectorModels failure no version..............................................."""
ddsim = DDSim()
ret = ddsim.getKnownDetectorModels()
self.assertFalse( ret['OK'] )
self.assertEqual( "No software version defined", ret['Message'] )
def test_getKnownDetModels2( self ):
"""test getKnownDetectorModels success.........................................................."""
ddsim = DDSim()
ddsim.version = "test"
import DIRAC
ddsim._ops = create_autospec(DIRAC.ConfigurationSystem.Client.Helpers.Operations.Operations, spec_set=True)
ddsim._ops.getOptionsDict.return_value = S_OK({"detModel1":"/path", "detModel2":"/path2"})
ret = ddsim.getKnownDetectorModels()
self.assertIn( "detModel1", ret['Value'] )
self.assertTrue( ret['OK'] )
def runTests():
"""Runs our tests"""
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestDDSim )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
print testResult
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DDSimTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
print testResult
if __name__ == '__main__':
runTests()
| [
"andre.philippe.sailer@cern.ch"
] | andre.philippe.sailer@cern.ch |
7f86b7ad0d656cca5c68cfeed92a6778b9b6ca30 | 19380754401eebfb890da93ba5eb216cf776ba82 | /fabrik/flowlabel.py | af7bbf8713288025ddd2e6c4104f5a2816a8b7c0 | [
"Apache-2.0"
] | permissive | viccro/diarc | 2693734fa92a7b4e98bc0d97ce6cbc85694a0476 | 5b8d18fe08ce175109e9c8e72a3d29c4078cd77a | refs/heads/master | 2021-01-14T10:44:47.104726 | 2015-08-06T18:15:14 | 2015-08-06T18:15:14 | 38,066,704 | 0 | 0 | null | 2015-06-25T18:30:46 | 2015-06-25T18:30:46 | null | UTF-8 | Python | false | false | 1,392 | py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" flowlabels are abbreviations that can be used to identify a flow. Flows do
not have a single unique attribute, which makes them difficult to identify.
flows solve that problem.
flowlabels have 2 parts:
origin node index
destination node index
Example:
flowlabel 1_2 means the flow from the node at index 1 to the node at index 2
"""
import re
def parse_flowlabel(flowlabel):
""" Parses a flowlabel into a tuple """
result = re.findall("(^\d+)(_)(\d+$)", flowlabel)
if len(result) == 0:
raise Exception("Invalid flowlabel %s"%flowlabel)
return (int(result[0][0]), int(result[0][2]))
def gen_flowlabel(origin_index, destination_index):
""" generate a flowlabel """
return "%d_%d"%(origin_index, destination_index)
| [
"amber.crosson@nytimes.com"
] | amber.crosson@nytimes.com |
cbd4324d8911038723a6c833cf2b4120d84a6c32 | 2d3a9a5ca9b15f4fc3edc443bf885f43421e31f4 | /done/db1.py | d80edea0dbc4f146d8eed4c5371e08a6c1db840c | [] | no_license | takao-fujiwara/mypy | 913ab4bdb80a97fe2be33d1615e4ee18c062578e | 93c0f8c7026278540fe84d7d590af41f032a0e9b | refs/heads/master | 2020-05-21T04:27:03.079300 | 2018-10-15T07:17:58 | 2018-10-15T07:17:58 | 47,017,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
engine = create_engine('sqlite://', echo=True)
print engine
metadata = MetaData()
metadata.bind = engine
menus = Table(
'menus', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('kcal', Integer)
)
from sqlalchemy.ext import declarative
Base = declarative.declarative_base()
| [
"cafetakao@gmail.com"
] | cafetakao@gmail.com |
b1ba433d509d020677e93798474154d142ff323a | 8588203926686503d233a7cf4128a9d179cb3e37 | /proyectos/chapter4b/adapters/repository_order_line.py | f31c2b82a639073689c13106a61a74eccddc06ed | [] | no_license | jlbali/python_architecture | bc615dbe978c55a2c8a5929f004411a0a72d6243 | e5a580e8a18fa2055d2f8a07bbaa3b98b06b83bb | refs/heads/master | 2023-02-06T11:54:09.819914 | 2020-12-26T00:13:36 | 2020-12-26T00:13:36 | 324,174,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | import abc
from domain.order_line import OrderLine
from typing import List, Optional
class AbstractOrderLineRepository(abc.ABC):
@abc.abstractmethod
def add(self, item: OrderLine):
raise NotImplementedError
@abc.abstractmethod
def get_by_ref(self, reference:str) -> OrderLine:
raise NotImplementedError
@abc.abstractmethod
def get(self, id:int) -> OrderLine:
raise NotImplementedError
@abc.abstractmethod
def list(self) -> List[OrderLine]:
raise NotImplementedError
@abc.abstractmethod
def delete(self, id:int):
raise NotImplementedError
@abc.abstractmethod
def delete_by_ref(self, reference:str):
raise NotImplementedError
class SqlAlchemyOrderLineRepository(AbstractOrderLineRepository):
def __init__(self, session):
self.session = session
def add(self, item: OrderLine):
self.session.add(item)
def get_by_ref(self, reference:str) -> OrderLine:
return self.session.query(OrderLine).filter_by(ref=reference).one()
def get(self, id:int) -> OrderLine:
return self.session.query(OrderLine).get(id)
def list(self) -> List[OrderLine]:
return self.session.query(OrderLine).all()
def delete(self, id:int):
self.session.query(OrderLine).filter(OrderLine.id==id).delete()
def delete_by_ref(self, reference:str):
self.session.query(OrderLine).filter(OrderLine.ref==reference).delete()
class FakeOrderLineRepository(AbstractOrderLineRepository):
def __init__(self, items: List[OrderLine]=[]):
self._current_id = 1
self._items = []
for item in items:
self.add(item)
def add(self, item: OrderLine):
item.id = self._current_id
self._items.append(item)
self._current_id += 1
def get(self, id: int) -> OrderLine:
return next(item for item in self._items if item.id == id)
def get_by_ref(self, reference: str) -> OrderLine:
return next(item for item in self._items if item.ref == reference)
def list(self) -> List[OrderLine]:
return self._items
def delete(self, id:int):
self._items = [item for item in self._items if not item.id == id]
def delete_by_ref(self, reference:str):
self._items = [item for item in self._items if not item.ref == reference]
| [
"juan.lucas.bali@gmail.com"
] | juan.lucas.bali@gmail.com |
3bee51e9e68133d4651c5816abba2c27197fc326 | 678c82b0436a707436f828532afdf0f6116190d5 | /publication/models.py | 55a36c5c5fb1750b6d880b41c7413cea8bf4926c | [] | no_license | fabioh80/play | 2f7f000a55130b926d7d95fa18583c375dc83504 | c6198b5de9d606f2e854cb3de6418f9ecc5d9e06 | refs/heads/master | 2021-01-12T14:41:06.683152 | 2016-10-27T17:25:04 | 2016-10-27T17:25:04 | 72,050,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from django.db import models
from datetime import datetime
class Publication(models.Model):
pub_date = models.DateTimeField(default=datetime.now(), blank=False)
place = models.ForeignKey("Place", related_name='place')
type_play = models.ForeignKey("Type_Play", related_name='play')
cnt_player = models.DecimalField(max_digits=2)
cnt_min_player = models.DecimalField(max_digits=2)
description = models.TextField(max_length=300, default='Insira aqui informações sobre o jogo.')
pub_max_date = models.DecimalField(default=datetime.now(), blank=False)
class Meta:
ordering = ['-pub_date']
verbose_name = 'publication'
verbose_name_plural = 'publications'
def __str__(self):
return self.pub_date + '-' + self.place
| [
"fabioh80@gmail.com"
] | fabioh80@gmail.com |
55bd1e9c03901d04fa75221fbb2e004339501afe | c3379fb707daf434fc731006e173da817b68ca75 | /pydatview/fast/runner.py | f321a81d662cb9f3714995b306df20a08da376e1 | [
"MIT"
] | permissive | dviewtest/pyDatView | 43cb6d2bb76a78670ecd1083495024f935bc9e9b | 3516ffaff601c122d62ffc94abd842958354ece8 | refs/heads/master | 2023-06-27T11:08:52.056689 | 2021-06-23T17:57:35 | 2021-06-23T17:57:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,304 | py | # --- For cmd.py
from __future__ import division, print_function
import os
import subprocess
import multiprocessing
import collections
import glob
import pandas as pd
import numpy as np
import shutil
import stat
import re
# --- Fast libraries
from weio.weio.fast_input_file import FASTInputFile
from weio.weio.fast_output_file import FASTOutputFile
# from pyFAST.input_output.fast_input_file import FASTInputFile
# from pyFAST.input_output.fast_output_file import FASTOutputFile
FAST_EXE='openfast'
# --------------------------------------------------------------------------------}
# --- Tools for executing FAST
# --------------------------------------------------------------------------------{
# --- START cmd.py
def run_cmds(inputfiles, exe, parallel=True, showOutputs=True, nCores=None, showCommand=True):
""" Run a set of simple commands of the form `exe input_file`
By default, the commands are run in "parallel" (though the method needs to be improved)
The stdout and stderr may be displayed on screen (`showOutputs`) or hidden.
A better handling is yet required.
"""
Failed=[]
def _report(p):
if p.returncode==0:
print('[ OK ] Input : ',p.input_file)
else:
Failed.append(p)
print('[FAIL] Input : ',p.input_file)
print(' Directory: '+os.getcwd())
print(' Command : '+p.cmd)
print(' Use `showOutputs=True` to debug, or run the command above.')
#out, err = p.communicate()
#print('StdOut:\n'+out)
#print('StdErr:\n'+err)
ps=[]
iProcess=0
if nCores is None:
nCores=multiprocessing.cpu_count()
if nCores<0:
nCores=len(inputfiles)+1
for i,f in enumerate(inputfiles):
#print('Process {}/{}: {}'.format(i+1,len(inputfiles),f))
ps.append(run_cmd(f, exe, wait=(not parallel), showOutputs=showOutputs, showCommand=showCommand))
iProcess += 1
# waiting once we've filled the number of cores
# TODO: smarter method with proper queue, here processes are run by chunks
if parallel:
if iProcess==nCores:
for p in ps:
p.wait()
for p in ps:
_report(p)
ps=[]
iProcess=0
# Extra process if not multiptle of nCores (TODO, smarter method)
for p in ps:
p.wait()
for p in ps:
_report(p)
# --- Giving a summary
if len(Failed)==0:
print('[ OK ] All simulations run successfully.')
return True
else:
print('[FAIL] {}/{} simulations failed:'.format(len(Failed),len(inputfiles)))
for p in Failed:
print(' ',p.input_file)
return False
def run_cmd(input_file_or_arglist, exe, wait=True, showOutputs=False, showCommand=True):
""" Run a simple command of the form `exe input_file` or `exe arg1 arg2` """
# TODO Better capture STDOUT
if isinstance(input_file_or_arglist, list):
args= [exe] + input_file_or_arglist
input_file = ' '.join(input_file_or_arglist)
input_file_abs = input_file
else:
input_file=input_file_or_arglist
if not os.path.isabs(input_file):
input_file_abs=os.path.abspath(input_file)
else:
input_file_abs=input_file
if not os.path.exists(exe):
raise Exception('Executable not found: {}'.format(exe))
args= [exe,input_file]
#args = 'cd '+workDir+' && '+ exe +' '+basename
shell=False
if showOutputs:
STDOut= None
else:
STDOut= open(os.devnull, 'w')
if showCommand:
print('Running: '+' '.join(args))
if wait:
class Dummy():
pass
p=Dummy()
p.returncode=subprocess.call(args , stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
else:
p=subprocess.Popen(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
# Storing some info into the process
p.cmd = ' '.join(args)
p.args = args
p.input_file = input_file
p.input_file_abs = input_file_abs
p.exe = exe
return p
# --- END cmd.py
def run_fastfiles(fastfiles, fastExe=None, parallel=True, showOutputs=True, nCores=None, showCommand=True, reRun=True):
if fastExe is None:
fastExe=FAST_EXE
if not reRun:
# Figure out which files exist
newfiles=[]
for f in fastfiles:
base=os.path.splitext(f)[0]
if os.path.exists(base+'.outb') or os.path.exists(base+'.out'):
print('>>> Skipping existing simulation for: ',f)
pass
else:
newfiles.append(f)
fastfiles=newfiles
return run_cmds(fastfiles, fastExe, parallel=parallel, showOutputs=showOutputs, nCores=nCores, showCommand=showCommand)
def run_fast(input_file, fastExe=None, wait=True, showOutputs=False, showCommand=True):
if fastExe is None:
fastExe=FAST_EXE
return run_cmd(input_file, fastExe, wait=wait, showOutputs=showOutputs, showCommand=showCommand)
def writeBatch(batchfile, fastfiles, fastExe=None):
""" Write batch file, everything is written relative to the batch file"""
if fastExe is None:
fastExe=FAST_EXE
fastExe_abs = os.path.abspath(fastExe)
batchfile_abs = os.path.abspath(batchfile)
batchdir = os.path.dirname(batchfile_abs)
fastExe_rel = os.path.relpath(fastExe_abs, batchdir)
with open(batchfile,'w') as f:
for ff in fastfiles:
ff_abs = os.path.abspath(ff)
ff_rel = os.path.relpath(ff_abs, batchdir)
l = fastExe_rel + ' '+ ff_rel
f.write("%s\n" % l)
def removeFASTOuputs(workDir):
# Cleaning folder
for f in glob.glob(os.path.join(workDir,'*.out')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.outb')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.ech')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.sum')):
os.remove(f)
if __name__=='__main__':
run_cmds(['main1.fst','main2.fst'], './Openfast.exe', parallel=True, showOutputs=False, nCores=4, showCommand=True)
pass
# --- Test of templateReplace
| [
"elmanuelito.github@gmail.com"
] | elmanuelito.github@gmail.com |
e54a3d7c4412e2993a3e50aea8f2ee52b2bc1a38 | 088e0c83bb0467f54ad6a481ce77e03bc9f32515 | /venv/Scripts/pip3-script.py | 106abf0b818cb42802790b83b774548aea6fd553 | [] | no_license | Nicole-peng/gy-1908A | 33bac0f7f5d3a2fddc2561807135d614e07c37a7 | c376359ade7c1844410f4b712ba8fbefec8bc9c2 | refs/heads/master | 2022-02-27T03:22:59.922174 | 2019-09-09T02:54:19 | 2019-09-09T02:54:19 | 207,207,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #!D:\softwareData\python\1908-A\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"pengyijieyiyi@foxmail.com"
] | pengyijieyiyi@foxmail.com |
0480e0b8c2d156438f5e163074ef17c3dfd8dbe5 | 90cbd3e71e0f6a5bc5175a1bafbc981c0ca64060 | /homework/hw2/Hu_hillary.py | 1ad46d01e702523d6f0d04e1abc7fb367657131f | [] | no_license | sammo3182/POLI7002-TextAsData | ef888f5ed9728bd11783cc5d5cb1febe88c6be9c | 389e13dbee07f747a02c7b8f306dead1d49379c8 | refs/heads/master | 2020-12-06T01:17:14.174854 | 2016-11-01T00:44:07 | 2016-11-01T00:44:07 | 66,304,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,987 | py | # Title: Homework2_Hillary
# Author: Yue Hu
# Environment: Win 10, Python 3.5
# Purpose: The assignment is a project using the New York Time API to scrape text data.
## module preload
import requests # imported the `requests` package for request url later.
import csv # for saving the results into csv files.
# ## Data scriping
# content = "Hilary" # content to search
# dateStart = "20160101" # starting date
# dateEnd = "20161231" #ending date
# apiKey = "951312b93d9e42d8b16c699a130fa5ef"
# url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?fq=" + content + "&page=1&begin_date=" + dateStart + "&end_date=" + dateEnd + "&api-key=" + apiKey
# print("The URL is ", url, "\n") # check the url output
# # http://api.nytimes.com/svc/search/v2/articlesearch.json?fq=obamacare&page=1&begin_date=20160101&end_date=20161231&api-key=951312b93d9e42d8b16c699a130fa5ef
# response = requests.get(url)
# data = response.json()
# doc_num = 1
# print("Dictionary keys are ", data["response"]["docs"][doc_num].keys())
# ## Draw specific info from the data
# # print(data["response"]["docs"][1])
# # print(data["response"]["docs"][doc_num]["keywords"][0])
# # print("The wordcount of the article ", doc_num,": ", data["response"]["docs"][doc_num]["wordcount"])
## Save the results
keep_going = True # set the trigger when the while loop ends.
page_num = 1 # start scrapping from the first page
doc_total = 10 # found from the previous checks
def keywordScan(lim):
n = 0
kw_list = []
while n < lim:
word = data["response"]["docs"][doc_num]["keywords"][n]["value"]
kw_list.append(word)
n += 1
return kw_list
while keep_going == True:
print(page_num) # double check if going through each page
content = "Hillary" # content to search
dateStart = "20160101" # starting date
dateEnd = "20160131" #ending date
apiKey = "951312b93d9e42d8b16c699a130fa5ef"
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?fq=" + content + "&page=" + str(page_num) +"&begin_date=" + dateStart + "&end_date=" + dateEnd + "&api-key=" + apiKey
response = requests.get(url)
data = response.json()
if len(data["response"]["docs"]) == 0: # when there is no docs
keep_going = False
else:
page_num += 1
# python excludes the last one in a range.
doc_total = len(data["response"]["docs"])
for doc_num in range(doc_total):
print(doc_num, "/", doc_total) # to trace the progress
# Create the variables
pub_date = data["response"]["docs"][doc_num]["pub_date"]
if len(pub_date) == 0: pub_date == "NA" # in case of the missing data
headline = data["response"]["docs"][doc_num]["headline"]["main"]
if len(headline) == 0: headline == "NA"
else: headline = headline.encode("utf-8", "ignore")
if isinstance(data["response"]["docs"][doc_num]["byline"], dict):
byline = data["response"]["docs"][doc_num]["byline"]["original"].encode("utf-8")
# When the byline is empty, it may return an empty list rather than a dictonary, and there could return an error.
if len(byline) == 0: byline == "NA"
lead_paragraph = data["response"]["docs"][doc_num]["lead_paragraph"]
if lead_paragraph == None or len(lead_paragraph) == 0: lead_paragraph == "NA"
else: lead_paragraph = lead_paragraph.encode("utf-8", "ignore")
word_count = data["response"]["docs"][doc_num]["word_count"]
if word_count == None or len(word_count) == 0:
word_count == "NA"
key_len = len(data["response"]["docs"][doc_num]["keywords"])
if key_len == 0:
keywords = "NA"
elif key_len == 1:
keywords = data["response"]["docs"][doc_num]["keywords"][0]["value"]
else:
keywords = keywordScan(2)
# Write the data into the csv file
row = [pub_date, headline, byline, lead_paragraph, word_count, keywords]
# for x in row:
# if isinstance(x, str):
# x = x.encode("utf-8", "ignore")
with open("./clinton.csv", "a") as my_csv: # “a” means append
data_writer = csv.writer(my_csv).writerow(row)
| [
"sammo3182@sina.com"
] | sammo3182@sina.com |
4570a4f1135d69481df51ef52485f7fe43b9827d | 430bd23decf16dc572a587b7af9f5c8e7dea5e6b | /clients/python/swagger_client/models/funding.py | ff0870b285368906885d8379191500364f1d06c3 | [
"Apache-2.0"
] | permissive | jltrade/api-connectors | 332d4df5e7e60bd27b6c5a43182df7d99a665972 | fa2cf561b414e18e9d2e1b5d68e94cc710d315e5 | refs/heads/master | 2020-06-19T10:20:46.022967 | 2016-09-24T13:12:17 | 2016-09-24T13:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,097 | py | # coding: utf-8
"""
BitMEX API
REST API for the BitMEX.com trading platform.<br><br><a href=\"/app/restAPI\">REST Documentation</a><br><a href=\"/app/wsAPI\">Websocket Documentation</a>
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Funding(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, timestamp=None, symbol=None, funding_interval=None, funding_rate=None, funding_rate_daily=None):
"""
Funding - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'timestamp': 'date',
'symbol': 'str',
'funding_interval': 'date',
'funding_rate': 'float',
'funding_rate_daily': 'float'
}
self.attribute_map = {
'timestamp': 'timestamp',
'symbol': 'symbol',
'funding_interval': 'fundingInterval',
'funding_rate': 'fundingRate',
'funding_rate_daily': 'fundingRateDaily'
}
self._timestamp = timestamp
self._symbol = symbol
self._funding_interval = funding_interval
self._funding_rate = funding_rate
self._funding_rate_daily = funding_rate_daily
@property
def timestamp(self):
"""
Gets the timestamp of this Funding.
:return: The timestamp of this Funding.
:rtype: date
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this Funding.
:param timestamp: The timestamp of this Funding.
:type: date
"""
self._timestamp = timestamp
@property
def symbol(self):
"""
Gets the symbol of this Funding.
:return: The symbol of this Funding.
:rtype: str
"""
return self._symbol
@symbol.setter
def symbol(self, symbol):
"""
Sets the symbol of this Funding.
:param symbol: The symbol of this Funding.
:type: str
"""
self._symbol = symbol
@property
def funding_interval(self):
"""
Gets the funding_interval of this Funding.
:return: The funding_interval of this Funding.
:rtype: date
"""
return self._funding_interval
@funding_interval.setter
def funding_interval(self, funding_interval):
"""
Sets the funding_interval of this Funding.
:param funding_interval: The funding_interval of this Funding.
:type: date
"""
self._funding_interval = funding_interval
@property
def funding_rate(self):
"""
Gets the funding_rate of this Funding.
:return: The funding_rate of this Funding.
:rtype: float
"""
return self._funding_rate
@funding_rate.setter
def funding_rate(self, funding_rate):
"""
Sets the funding_rate of this Funding.
:param funding_rate: The funding_rate of this Funding.
:type: float
"""
self._funding_rate = funding_rate
@property
def funding_rate_daily(self):
"""
Gets the funding_rate_daily of this Funding.
:return: The funding_rate_daily of this Funding.
:rtype: float
"""
return self._funding_rate_daily
@funding_rate_daily.setter
def funding_rate_daily(self, funding_rate_daily):
"""
Sets the funding_rate_daily of this Funding.
:param funding_rate_daily: The funding_rate_daily of this Funding.
:type: float
"""
self._funding_rate_daily = funding_rate_daily
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"samuel.trace.reed@gmail.com"
] | samuel.trace.reed@gmail.com |
d40fe1fa68ac26ffdef8cf290971dda494b0fc6b | 7cfa2d0e1b28cf14925b5ce55d1b2a9e55bfdb73 | /Week5/Quiz.py | fe8263f84bbcba5785fdfae22c59d5bbf85c23e3 | [] | no_license | ads2100/Saudi-Developer-Organization | 45dcb9a5813fe1725ee3b2ca8defe8331625dc39 | 0dbeb0843ff476805dfc67f774de9f45ffac0754 | refs/heads/master | 2022-02-25T13:54:32.165819 | 2019-09-27T12:42:16 | 2019-09-27T12:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | # the lists of numbers
A = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
B = [2,4,6,8,10,12,16]
# defaul number for var y
start = 0
# nested loops for print A,B lists elements
for x in A[2:17]:
print('List A element: ' + str(x))
for y in B[start:6]:
print('List B element: '+ str(y))
start+=1
break
| [
"root1@MacBook-Engneering-waseem.local"
] | root1@MacBook-Engneering-waseem.local |
79b6d3007b7c9f2d365f2bb097665e9d494d16d9 | 37a593995c96c65be0e93722cb395cdfac9b0bd2 | /pymongo_fetcher.py | fc214e3f1276dd1f013453a28a50b43a6a5a1251 | [] | no_license | in0928/com-schedule | 26401eb6b21912404cc666db894a52e45de82f2e | 1b0e9989f682ec923687e59e26feb402862157f2 | refs/heads/master | 2022-12-12T10:45:00.508719 | 2020-02-12T08:28:19 | 2020-02-12T08:28:19 | 226,829,792 | 0 | 0 | null | 2022-12-08T03:20:11 | 2019-12-09T09:09:06 | Python | UTF-8 | Python | false | false | 500 | py | from pymongo import MongoClient
class MongoFetcher:
def __init__(self):
# real DB
self.client = MongoClient("mongodb+srv://in0928:trybest920928LAISAI@cluster0-nfgyd.gcp.mongodb.net/test?retryWrites=true&w=majority") # host uri
self.db = self.client.NSDB # Select the database
self.schedule_this_month = self.db.scheduleThisMonth
self.schedule_next_month = self.db.scheduleNextMonth
self.unions = self.db.unions # 2 this is used with schedule
| [
"49297560+koasurion@users.noreply.github.com"
] | 49297560+koasurion@users.noreply.github.com |
1c033942dc1bbb7c998f9e475650b707ae7e453d | 06e6b257405a077b9cac1cd03373d2ded225634f | /manage.py | 7f59ef13a4055f5d50601ed26fd3e2e3b511cf7e | [] | no_license | darrenwu1058/mystore | ae4bec6ef37976e69ee620974d28f4723788feff | 158487a768ac80e0b632de5e60fcfde18e0e51f0 | refs/heads/master | 2020-05-20T22:44:57.255753 | 2019-03-08T16:16:47 | 2019-05-09T09:09:57 | 185,788,305 | 0 | 0 | null | 2019-05-09T11:45:12 | 2019-05-09T11:45:12 | null | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mystore.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"github.tsod@tsod.idv.tw"
] | github.tsod@tsod.idv.tw |
a19a5bde912baaf41479780b9b03f7f7f91d23f2 | c1dad911fef54ecf07997a969e72b9f1f316378b | /Longestpalindrome.py | 4fdb8e7af00463c75dba813f0cb728e03d312bf0 | [] | no_license | jatin008/DP-6 | 3b3d2efbc87ea87880912f65a801b99dbf237dd4 | ef2397061613e7b3818506ee7a683c030464d91d | refs/heads/master | 2021-05-22T00:35:55.906604 | 2020-04-04T02:19:00 | 2020-04-04T02:19:00 | 252,887,262 | 0 | 0 | null | 2020-04-04T02:08:17 | 2020-04-04T02:08:16 | null | UTF-8 | Python | false | false | 889 | py | #T: O(M+N)
#S: 0(1)
#Tested on Leetcode
class Solution:
def longestPalindrome(self, s: str) -> str:
self.start = 0
self.maxLen = 0
n = len(s)
if len(s) < 2: #Edge case
return s
for i in range(n):
#checking if a string is palindrome or not
self.checkpalindrome(s,i,i)
self.checkpalindrome(s,i,i+1)
return(s[self.start: (self.start + self.maxLen)])
def checkpalindrome(self, s, left, right):
while(left>=0 and right <len(s) and (s[left] == s[right])): # if the characters match moving towards left and right of that character
left-=1
right+=1
if (right - (left+1) > self.maxLen): # updating the maximum length
self.maxLen = right - (left+1)
self.start = left + 1
| [
"jatindawar26@gmail.com"
] | jatindawar26@gmail.com |
276a00dcd091106149ec816df2e84655c49386ea | 654caebc0a12face3f5a57fd4c64ffb85c6fe0c6 | /venv/bin/pip | 28f57425b752cd07254df5c6e4572c4495da78cc | [] | no_license | hoAlimoradi/TheFirstCourseInAPA | ede5cdae74eefe175c41a2ade51aff6c701f3810 | e6024ce2f586cecd016e1fc762affec9ffe6c6ad | refs/heads/master | 2020-04-27T11:54:38.236215 | 2019-03-07T09:42:03 | 2019-03-07T09:42:03 | 174,313,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | #!/Users/ho/pycharmProjects/TheFirstCourseInAPA/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"ho.alimoradi@gmail.com"
] | ho.alimoradi@gmail.com | |
7c6c857ffe8c602712021be9078f9c31a1a5d128 | 97dc530ab8852f328fa1974710caabd3ffec32ac | /apps/cron_consume_apk/bak/cron_crawl_appinfo_v2.py | d8335f9e367f950efbaf9a7cbdc7b64d2b46cfa7 | [] | no_license | danielzhu1201/getAppInfo | 12df4c9f99a06556313c0e90ab672dbcda246725 | 7db1c991fa9e56c1760a50f275d35d63bd15988a | refs/heads/master | 2020-03-20T00:31:59.403042 | 2018-06-14T06:09:40 | 2018-06-14T06:09:40 | 137,047,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# qw @ 2017-03-08 17:48:52
import sys
import os
import json
from config import *
import redis
import multiprocessing
import time
import random
import traceback
import urllib
import logging
import datetime
logging.basicConfig(
level=logging.DEBUG,
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='./%s.%s.log'%(sys.argv[0],datetime.datetime.now().strftime("%Y%m%d%H%M%S")),
filemode='a')
wait_time = 5#数据库没有数据之后的间隔访问时间
def startthread(url):
logging.debug(url)
try:
response = urllib.urlopen(url).read()
except:
traceback.print_exc()
def select_platform_and_get_apk():
redis_dict = {
"android": redis.Redis(db = redis_db["android"]),
"ios": redis.Redis(db = redis_db["ios"]),
"crawled": redis.Redis(db = redis_db["crawled"]),
}
while 1:
platforms = redis_db.keys()
platform = random.choice([xplatform for xplatform in platforms if xplatform in crawl_db])
redis_get_apk = redis.Redis(db=redis_db["crawl_apk"])
lineT = redis_get_apk.spop(platform)
if lineT == None:
time.sleep(wait_time)
continue
apk = lineT.split(" ")[0].strip()
redis_platform = redis_dict[platform]
redis_crawled = redis_dict["crawled"]
exists_status = redis_platform.exists(apk.lower())
exists_crawled = redis_crawled.sismember(platform,apk)
logging.info("\t".join([str(m) for m in [apk,platform,redis_db[platform],exists_status,exists_crawled]]))
if exists_status or exists_crawled:
continue
else:
url = "http://dm.umlife.com/appinfo/api/v1/getInfo?os=%s&pkg=%s"%(platform,apk)
startthread(url)
redis_crawled.sadd(platform,apk)
time.sleep(random.randint(0,10)*1.0/50)
#select_platform_and_get_apk()
pool = multiprocessing.Pool(crawl_thread_num)
for x_thread_num in range(crawl_thread_num):
pool.apply_async(select_platform_and_get_apk,())
#pool.apply_async(select_platform_and_get_apk,())
pool.close()
pool.join()
logging.info("all done")
#os._exit()
| [
"zhuzhaosong@youmi.net"
] | zhuzhaosong@youmi.net |
cfbdabfc13d6e89a1cfcbc015ee849ffc5635eb5 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/db/sqlmap/plugins/generic/filesystem.py | ee9770612e90346479646bec319c7d2028574f13 | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,062 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
from lib.core.agent import agent
from lib.core.common import dataToOutFile
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import decloakToTemp
from lib.core.common import decodeHexValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isListLike
from lib.core.common import isStackingAvailable
from lib.core.common import isTechniqueAvailable
from lib.core.common import readInput
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapUndefinedMethod
from lib.request import inject
class Filesystem:
"""
This class defines generic OS file system functionalities for plugins.
"""
def __init__(self):
self.fileTblName = "sqlmapfile"
self.tblField = "data"
def _checkFileLength(self, localFile, remoteFile, fileRead=False):
if Backend.isDbms(DBMS.MYSQL):
lengthQuery = "LENGTH(LOAD_FILE('%s'))" % remoteFile
elif Backend.isDbms(DBMS.PGSQL) and not fileRead:
lengthQuery = "SELECT LENGTH(data) FROM pg_largeobject WHERE loid=%d" % self.oid
elif Backend.isDbms(DBMS.MSSQL):
self.createSupportTbl(self.fileTblName, self.tblField, "VARBINARY(MAX)")
inject.goStacked("INSERT INTO %s(%s) SELECT %s FROM OPENROWSET(BULK '%s', SINGLE_BLOB) AS %s(%s)" % (
self.fileTblName, self.tblField, self.tblField, remoteFile, self.fileTblName, self.tblField));
lengthQuery = "SELECT DATALENGTH(%s) FROM %s" % (self.tblField, self.fileTblName)
localFileSize = os.path.getsize(localFile)
if fileRead and Backend.isDbms(DBMS.PGSQL):
logger.info("length of read file %s cannot be checked on PostgreSQL" % remoteFile)
sameFile = True
else:
logger.debug("checking the length of the remote file %s" % remoteFile)
remoteFileSize = inject.getValue(lengthQuery, resumeValue=False, expected=EXPECTED.INT,
charsetType=CHARSET_TYPE.DIGITS)
sameFile = None
if isNumPosStrValue(remoteFileSize):
remoteFileSize = long(remoteFileSize)
sameFile = False
if localFileSize == remoteFileSize:
sameFile = True
infoMsg = "the local file %s and the remote file " % localFile
infoMsg += "%s have the same size (%db)" % (remoteFile, localFileSize)
elif remoteFileSize > localFileSize:
infoMsg = "the remote file %s is larger (%db) than " % (remoteFile, remoteFileSize)
infoMsg += "the local file %s (%db)" % (localFile, localFileSize)
else:
infoMsg = "the remote file %s is smaller (%db) than " % (remoteFile, remoteFileSize)
infoMsg += "file %s (%db)" % (localFile, localFileSize)
logger.info(infoMsg)
else:
sameFile = False
warnMsg = "it looks like the file has not been written (usually "
warnMsg += "occurs if the DBMS process' user has no write "
warnMsg += "privileges in the destination path)"
logger.warn(warnMsg)
return sameFile
def fileToSqlQueries(self, fcEncodedList):
"""
Called by MySQL and PostgreSQL plugins to write a file on the
back-end DBMS underlying file system
"""
counter = 0
sqlQueries = []
for fcEncodedLine in fcEncodedList:
if counter == 0:
sqlQueries.append("INSERT INTO %s(%s) VALUES (%s)" % (self.fileTblName, self.tblField, fcEncodedLine))
else:
updatedField = agent.simpleConcatenate(self.tblField, fcEncodedLine)
sqlQueries.append("UPDATE %s SET %s=%s" % (self.fileTblName, self.tblField, updatedField))
counter += 1
return sqlQueries
def fileEncode(self, fileName, encoding, single):
"""
Called by MySQL and PostgreSQL plugins to write a file on the
back-end DBMS underlying file system
"""
retVal = []
with open(fileName, "rb") as f:
content = f.read().encode(encoding).replace("\n", "")
if not single:
if len(content) > 256:
for i in xrange(0, len(content), 256):
_ = content[i:i + 256]
if encoding == "hex":
_ = "0x%s" % _
elif encoding == "base64":
_ = "'%s'" % _
retVal.append(_)
if not retVal:
if encoding == "hex":
content = "0x%s" % content
elif encoding == "base64":
content = "'%s'" % content
retVal = [content]
return retVal
def askCheckWrittenFile(self, localFile, remoteFile, forceCheck=False):
output = None
if forceCheck is not True:
message = "do you want confirmation that the local file '%s' " % localFile
message += "has been successfully written on the back-end DBMS "
message += "file system (%s)? [Y/n] " % remoteFile
output = readInput(message, default="Y")
if forceCheck or (output and output.lower() == "y"):
return self._checkFileLength(localFile, remoteFile)
return True
def askCheckReadFile(self, localFile, remoteFile):
message = "do you want confirmation that the remote file '%s' " % remoteFile
message += "has been successfully downloaded from the back-end "
message += "DBMS file system? [Y/n] "
output = readInput(message, default="Y")
if not output or output in ("y", "Y"):
return self._checkFileLength(localFile, remoteFile, True)
return None
def nonStackedReadFile(self, remoteFile):
errMsg = "'nonStackedReadFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def stackedReadFile(self, remoteFile):
errMsg = "'stackedReadFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def unionWriteFile(self, localFile, remoteFile, fileType, forceCheck=False):
errMsg = "'unionWriteFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def stackedWriteFile(self, localFile, remoteFile, fileType, forceCheck=False):
errMsg = "'stackedWriteFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def readFile(self, remoteFiles):
localFilePaths = []
self.checkDbmsOs()
for remoteFile in remoteFiles.split(","):
fileContent = None
kb.fileReadMode = True
if conf.direct or isStackingAvailable():
if isStackingAvailable():
debugMsg = "going to read the file with stacked query SQL "
debugMsg += "injection technique"
logger.debug(debugMsg)
fileContent = self.stackedReadFile(remoteFile)
elif Backend.isDbms(DBMS.MYSQL):
debugMsg = "going to read the file with a non-stacked query "
debugMsg += "SQL injection technique"
logger.debug(debugMsg)
fileContent = self.nonStackedReadFile(remoteFile)
else:
errMsg = "none of the SQL injection techniques detected can "
errMsg += "be used to read files from the underlying file "
errMsg += "system of the back-end %s server" % Backend.getDbms()
logger.error(errMsg)
fileContent = None
kb.fileReadMode = False
if fileContent in (None, "") and not Backend.isDbms(DBMS.PGSQL):
self.cleanup(onlyFileTbl=True)
elif isListLike(fileContent):
newFileContent = ""
for chunk in fileContent:
if isListLike(chunk):
if len(chunk) > 0:
chunk = chunk[0]
else:
chunk = ""
if chunk:
newFileContent += chunk
fileContent = newFileContent
if fileContent is not None:
fileContent = decodeHexValue(fileContent, True)
if fileContent:
localFilePath = dataToOutFile(remoteFile, fileContent)
if not Backend.isDbms(DBMS.PGSQL):
self.cleanup(onlyFileTbl=True)
sameFile = self.askCheckReadFile(localFilePath, remoteFile)
if sameFile is True:
localFilePath += " (same file)"
elif sameFile is False:
localFilePath += " (size differs from remote file)"
localFilePaths.append(localFilePath)
else:
errMsg = "no data retrieved"
logger.error(errMsg)
return localFilePaths
def writeFile(self, localFile, remoteFile, fileType=None, forceCheck=False):
written = False
checkFile(localFile)
self.checkDbmsOs()
if localFile.endswith('_'):
localFile = decloakToTemp(localFile)
if conf.direct or isStackingAvailable():
if isStackingAvailable():
debugMsg = "going to upload the %s file with " % fileType
debugMsg += "stacked query SQL injection technique"
logger.debug(debugMsg)
written = self.stackedWriteFile(localFile, remoteFile, fileType, forceCheck)
self.cleanup(onlyFileTbl=True)
elif isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION) and Backend.isDbms(DBMS.MYSQL):
debugMsg = "going to upload the %s file with " % fileType
debugMsg += "UNION query SQL injection technique"
logger.debug(debugMsg)
written = self.unionWriteFile(localFile, remoteFile, fileType, forceCheck)
else:
errMsg = "none of the SQL injection techniques detected can "
errMsg += "be used to write files to the underlying file "
errMsg += "system of the back-end %s server" % Backend.getDbms()
logger.error(errMsg)
return None
return written
| [
"everping@outlook.com"
] | everping@outlook.com |
bccd589288338812701721351e97a1efb0ac9b50 | f1dc6331fa73b7f3f5bc6b3af64ceab4a4804849 | /src/sagemaker/local/image.py | 432567484b5eae3f674b9a35416a577ac48747cc | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lily1110/sagemaker-python-sdk | 77532f9ba9ed105dd56f0b3d1824d2dd9cd7065e | fb91511e32610965bcb793e976eb7a47d161c04f | refs/heads/master | 2020-03-20T12:42:36.228606 | 2018-06-14T22:26:56 | 2018-06-14T22:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,342 | py | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import base64
import errno
import json
import logging
import os
import platform
import random
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
from fcntl import fcntl, F_GETFL, F_SETFL
from six.moves.urllib.parse import urlparse
from threading import Thread
import yaml
import sagemaker
from sagemaker.utils import get_config_value
CONTAINER_PREFIX = "algo"
DOCKER_COMPOSE_FILENAME = 'docker-compose.yaml'
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class _SageMakerContainer(object):
"""Handle the lifecycle and configuration of a local docker container execution.
This class is responsible for creating the directories and configuration files that
the docker containers will use for either training or serving.
"""
def __init__(self, instance_type, instance_count, image, sagemaker_session=None):
"""Initialize a SageMakerContainer instance
It uses a :class:`sagemaker.session.Session` for general interaction with user configuration
such as getting the default sagemaker S3 bucket. However this class does not call any of the
SageMaker APIs.
Args:
instance_type (str): The instance type to use. Either 'local' or 'local_gpu'
instance_count (int): The number of instances to create.
image (str): docker image to use.
sagemaker_session (sagemaker.session.Session): a sagemaker session to use when interacting
with SageMaker.
"""
from sagemaker.local.local_session import LocalSession
self.sagemaker_session = sagemaker_session or LocalSession()
self.instance_type = instance_type
self.instance_count = instance_count
self.image = image
# Since we are using a single docker network, Generate a random suffix to attach to the container names.
# This way multiple jobs can run in parallel.
suffix = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
self.hosts = ['{}-{}-{}'.format(CONTAINER_PREFIX, i, suffix) for i in range(1, self.instance_count + 1)]
self.container_root = None
self.container = None
def train(self, input_data_config, hyperparameters):
"""Run a training job locally using docker-compose.
Args:
input_data_config (dict): The Input Data Configuration, this contains data such as the
channels to be used for training.
hyperparameters (dict): The HyperParameters for the training job.
Returns (str): Location of the trained model.
"""
self.container_root = self._create_tmp_folder()
os.mkdir(os.path.join(self.container_root, 'output'))
# A shared directory for all the containers. It is only mounted if the training script is
# Local.
shared_dir = os.path.join(self.container_root, 'shared')
os.mkdir(shared_dir)
data_dir = self._create_tmp_folder()
volumes = self._prepare_training_volumes(data_dir, input_data_config, hyperparameters)
# Create the configuration files for each container that we will create
# Each container will map the additional local volumes (if any).
for host in self.hosts:
_create_config_file_directories(self.container_root, host)
self.write_config_files(host, hyperparameters, input_data_config)
shutil.copytree(data_dir, os.path.join(self.container_root, host, 'input', 'data'))
compose_data = self._generate_compose_file('train', additional_volumes=volumes)
compose_command = self._compose()
_ecr_login_if_needed(self.sagemaker_session.boto_session, self.image)
process = subprocess.Popen(compose_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
_stream_output(process)
except RuntimeError as e:
# _stream_output() doesn't have the command line. We will handle the exception
# which contains the exit code and append the command line to it.
msg = "Failed to run: %s, %s" % (compose_command, e.message)
raise RuntimeError(msg)
s3_artifacts = self.retrieve_artifacts(compose_data)
# free up the training data directory as it may contain
# lots of data downloaded from S3. This doesn't delete any local
# data that was just mounted to the container.
_delete_tree(data_dir)
_delete_tree(shared_dir)
# Also free the container config files.
for host in self.hosts:
container_config_path = os.path.join(self.container_root, host)
_delete_tree(container_config_path)
self._cleanup()
# Print our Job Complete line to have a simmilar experience to training on SageMaker where you
# see this line at the end.
print('===== Job Complete =====')
return s3_artifacts
def serve(self, primary_container):
"""Host a local endpoint using docker-compose.
Args:
primary_container (dict): dictionary containing the container runtime settings
for serving. Expected keys:
- 'ModelDataUrl' pointing to a local file
- 'Environment' a dictionary of environment variables to be passed to the hosting container.
"""
logger.info("serving")
self.container_root = self._create_tmp_folder()
logger.info('creating hosting dir in {}'.format(self.container_root))
model_dir = primary_container['ModelDataUrl']
if not model_dir.lower().startswith("s3://"):
for h in self.hosts:
host_dir = os.path.join(self.container_root, h)
os.makedirs(host_dir)
shutil.copytree(model_dir, os.path.join(self.container_root, h, 'model'))
env_vars = ['{}={}'.format(k, v) for k, v in primary_container['Environment'].items()]
_ecr_login_if_needed(self.sagemaker_session.boto_session, self.image)
# If the user script was passed as a file:// mount it to the container.
script_dir = primary_container['Environment'][sagemaker.estimator.DIR_PARAM_NAME.upper()]
parsed_uri = urlparse(script_dir)
volumes = []
if parsed_uri.scheme == 'file':
volumes.append(_Volume(parsed_uri.path, '/opt/ml/code'))
self._generate_compose_file('serve',
additional_env_vars=env_vars,
additional_volumes=volumes)
compose_command = self._compose()
self.container = _HostingContainer(compose_command)
self.container.start()
def stop_serving(self):
"""Stop the serving container.
The serving container runs in async mode to allow the SDK to do other tasks.
"""
if self.container:
self.container.down()
self.container.join()
self._cleanup()
# for serving we can delete everything in the container root.
_delete_tree(self.container_root)
def retrieve_artifacts(self, compose_data):
"""Get the model artifacts from all the container nodes.
Used after training completes to gather the data from all the individual containers. As the
official SageMaker Training Service, it will override duplicate files if multiple containers have
the same file names.
Args:
compose_data(dict): Docker-Compose configuration in dictionary format.
Returns: Local path to the collected model artifacts.
"""
# Grab the model artifacts from all the Nodes.
s3_artifacts = os.path.join(self.container_root, 's3_artifacts')
os.mkdir(s3_artifacts)
s3_model_artifacts = os.path.join(s3_artifacts, 'model')
s3_output_artifacts = os.path.join(s3_artifacts, 'output')
os.mkdir(s3_model_artifacts)
os.mkdir(s3_output_artifacts)
for host in self.hosts:
volumes = compose_data['services'][str(host)]['volumes']
for volume in volumes:
host_dir, container_dir = volume.split(':')
if container_dir == '/opt/ml/model':
self._recursive_copy(host_dir, s3_model_artifacts)
elif container_dir == '/opt/ml/output':
self._recursive_copy(host_dir, s3_output_artifacts)
return s3_model_artifacts
def write_config_files(self, host, hyperparameters, input_data_config):
"""Write the config files for the training containers.
This method writes the hyperparameters, resources and input data configuration files.
Args:
host (str): Host to write the configuration for
hyperparameters (dict): Hyperparameters for training.
input_data_config (dict): Training input channels to be used for training.
Returns: None
"""
config_path = os.path.join(self.container_root, host, 'input', 'config')
resource_config = {
'current_host': host,
'hosts': self.hosts
}
json_input_data_config = {
c['ChannelName']: {'ContentType': 'application/octet-stream'} for c in input_data_config
}
_write_json_file(os.path.join(config_path, 'hyperparameters.json'), hyperparameters)
_write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config)
_write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config)
def _recursive_copy(self, src, dst):
for root, dirs, files in os.walk(src):
root = os.path.relpath(root, src)
current_path = os.path.join(src, root)
target_path = os.path.join(dst, root)
for file in files:
shutil.copy(os.path.join(current_path, file), os.path.join(target_path, file))
for dir in dirs:
new_dir = os.path.join(target_path, dir)
if not os.path.exists(new_dir):
os.mkdir(os.path.join(target_path, dir))
def _download_folder(self, bucket_name, prefix, target):
boto_session = self.sagemaker_session.boto_session
s3 = boto_session.resource('s3')
bucket = s3.Bucket(bucket_name)
for obj_sum in bucket.objects.filter(Prefix=prefix):
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
s3_relative_path = obj_sum.key[len(prefix):].lstrip('/')
file_path = os.path.join(target, s3_relative_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
obj.download_file(file_path)
def _prepare_training_volumes(self, data_dir, input_data_config, hyperparameters):
shared_dir = os.path.join(self.container_root, 'shared')
volumes = []
# Set up the channels for the containers. For local data we will
# mount the local directory to the container. For S3 Data we will download the S3 data
# first.
for channel in input_data_config:
if channel['DataSource'] and 'S3DataSource' in channel['DataSource']:
uri = channel['DataSource']['S3DataSource']['S3Uri']
elif channel['DataSource'] and 'FileDataSource' in channel['DataSource']:
uri = channel['DataSource']['FileDataSource']['FileUri']
else:
raise ValueError('Need channel[\'DataSource\'] to have'
' [\'S3DataSource\'] or [\'FileDataSource\']')
parsed_uri = urlparse(uri)
key = parsed_uri.path.lstrip('/')
channel_name = channel['ChannelName']
channel_dir = os.path.join(data_dir, channel_name)
os.mkdir(channel_dir)
if parsed_uri.scheme == 's3':
bucket_name = parsed_uri.netloc
self._download_folder(bucket_name, key, channel_dir)
elif parsed_uri.scheme == 'file':
path = parsed_uri.path
volumes.append(_Volume(path, channel=channel_name))
else:
raise ValueError('Unknown URI scheme {}'.format(parsed_uri.scheme))
# If there is a training script directory and it is a local directory,
# mount it to the container.
if sagemaker.estimator.DIR_PARAM_NAME in hyperparameters:
training_dir = json.loads(hyperparameters[sagemaker.estimator.DIR_PARAM_NAME])
parsed_uri = urlparse(training_dir)
if parsed_uri.scheme == 'file':
volumes.append(_Volume(parsed_uri.path, '/opt/ml/code'))
# Also mount a directory that all the containers can access.
volumes.append(_Volume(shared_dir, '/opt/ml/shared'))
return volumes
def _generate_compose_file(self, command, additional_volumes=None, additional_env_vars=None):
"""Writes a config file describing a training/hosting environment.
This method generates a docker compose configuration file, it has an entry for each container
that will be created (based on self.hosts). it calls
:meth:~sagemaker.local_session.SageMakerContainer._create_docker_host to generate the config
for each individual container.
Args:
command (str): either 'train' or 'serve'
additional_volumes (list): a list of volumes that will be mapped to the containers
additional_env_vars (dict): a dictionary with additional environment variables to be
passed on to the containers.
Returns: (dict) A dictionary representation of the configuration that was written.
"""
boto_session = self.sagemaker_session.boto_session
additional_env_vars = additional_env_vars or []
additional_volumes = additional_volumes or {}
environment = []
optml_dirs = set()
aws_creds = _aws_credentials(boto_session)
if aws_creds is not None:
environment.extend(aws_creds)
environment.extend(additional_env_vars)
if command == 'train':
optml_dirs = {'output', 'input'}
services = {
h: self._create_docker_host(h, environment, optml_dirs,
command, additional_volumes) for h in self.hosts
}
content = {
# Some legacy hosts only support the 2.1 format.
'version': '2.1',
'services': services,
'networks': {
'sagemaker-local': {'name': 'sagemaker-local'}
}
}
docker_compose_path = os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME)
yaml_content = yaml.dump(content, default_flow_style=False)
logger.info('docker compose file: \n{}'.format(yaml_content))
with open(docker_compose_path, 'w') as f:
f.write(yaml_content)
return content
def _compose(self, detached=False):
compose_cmd = 'docker-compose'
command = [
compose_cmd,
'-f',
os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME),
'up',
'--build',
'--abort-on-container-exit'
]
if detached:
command.append('-d')
logger.info('docker command: {}'.format(' '.join(command)))
return command
def _create_docker_host(self, host, environment, optml_subdirs, command, volumes):
optml_volumes = self._build_optml_volumes(host, optml_subdirs)
optml_volumes.extend(volumes)
host_config = {
'image': self.image,
'stdin_open': True,
'tty': True,
'volumes': [v.map for v in optml_volumes],
'environment': environment,
'command': command,
'networks': {
'sagemaker-local': {
'aliases': [host]
}
}
}
if command == 'serve':
serving_port = get_config_value('local.serving_port',
self.sagemaker_session.config) or 8080
host_config.update({
'ports': [
'%s:8080' % serving_port
]
})
return host_config
def _create_tmp_folder(self):
root_dir = get_config_value('local.container_root', self.sagemaker_session.config)
if root_dir:
root_dir = os.path.abspath(root_dir)
dir = tempfile.mkdtemp(dir=root_dir)
# Docker cannot mount Mac OS /var folder properly see
# https://forums.docker.com/t/var-folders-isnt-mounted-properly/9600
# Only apply this workaround if the user didn't provide an alternate storage root dir.
if root_dir is None and platform.system() == 'Darwin':
dir = '/private{}'.format(dir)
return os.path.abspath(dir)
def _build_optml_volumes(self, host, subdirs):
"""Generate a list of :class:`~sagemaker.local_session.Volume` required for the container to start.
It takes a folder with the necessary files for training and creates a list of opt volumes that
the Container needs to start.
Args:
host (str): container for which the volumes will be generated.
subdirs (list): list of subdirectories that will be mapped. For example: ['input', 'output', 'model']
Returns: (list) List of :class:`~sagemaker.local_session.Volume`
"""
volumes = []
# Ensure that model is in the subdirs
if 'model' not in subdirs:
subdirs.add('model')
for subdir in subdirs:
host_dir = os.path.join(self.container_root, host, subdir)
container_dir = '/opt/ml/{}'.format(subdir)
volume = _Volume(host_dir, container_dir)
volumes.append(volume)
return volumes
def _cleanup(self):
# we don't need to cleanup anything at the moment
pass
class _HostingContainer(Thread):
def __init__(self, command):
Thread.__init__(self)
self.command = command
self.process = None
def run(self):
self.process = subprocess.Popen(self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
_stream_output(self.process)
except RuntimeError as e:
# _stream_output() doesn't have the command line. We will handle the exception
# which contains the exit code and append the command line to it.
msg = "Failed to run: %s, %s" % (self.command, e.message)
raise RuntimeError(msg)
def down(self):
self.process.terminate()
class _Volume(object):
"""Represent a Volume that will be mapped to a container.
"""
def __init__(self, host_dir, container_dir=None, channel=None):
"""Create a Volume instance
the container path can be provided as a container_dir or as a channel name but not both.
Args:
host_dir (str): path to the volume data in the host
container_dir (str): path inside the container that host_dir will be mapped to
channel (str): channel name that the host_dir represents. It will be mapped as
/opt/ml/input/data/<channel> in the container.
"""
if not container_dir and not channel:
raise ValueError('Either container_dir or channel must be declared.')
if container_dir and channel:
raise ValueError('container_dir and channel cannot be declared together.')
self.container_dir = container_dir if container_dir else os.path.join('/opt/ml/input/data', channel)
self.host_dir = host_dir
if platform.system() == 'Darwin' and host_dir.startswith('/var'):
self.host_dir = os.path.join('/private', host_dir)
self.map = '{}:{}'.format(self.host_dir, self.container_dir)
def _stream_output(process):
"""Stream the output of a process to stdout
This function takes an existing process that will be polled for output. Both stdout and
stderr will be polled and both will be sent to sys.stdout.
Args:
process(subprocess.Popen): a process that has been started with
stdout=PIPE and stderr=PIPE
Returns (int): process exit code
"""
exit_code = None
# Get the current flags for the stderr file descriptor
# And add the NONBLOCK flag to allow us to read even if there is no data.
# Since usually stderr will be empty unless there is an error.
flags = fcntl(process.stderr, F_GETFL) # get current process.stderr flags
fcntl(process.stderr, F_SETFL, flags | os.O_NONBLOCK)
while exit_code is None:
stdout = process.stdout.readline().decode("utf-8")
sys.stdout.write(stdout)
try:
stderr = process.stderr.readline().decode("utf-8")
sys.stdout.write(stderr)
except IOError:
# If there is nothing to read on stderr we will get an IOError
# this is fine.
pass
exit_code = process.poll()
if exit_code != 0:
raise RuntimeError("Process exited with code: %s" % exit_code)
return exit_code
def _check_output(cmd, *popenargs, **kwargs):
if isinstance(cmd, str):
cmd = shlex.split(cmd)
success = True
try:
output = subprocess.check_output(cmd, *popenargs, **kwargs)
except subprocess.CalledProcessError as e:
output = e.output
success = False
output = output.decode("utf-8")
if not success:
logger.error("Command output: %s" % output)
raise Exception("Failed to run %s" % ",".join(cmd))
return output
def _create_config_file_directories(root, host):
for d in ['input', 'input/config', 'output', 'model']:
os.makedirs(os.path.join(root, host, d))
def _delete_tree(path):
try:
shutil.rmtree(path)
except OSError as exc:
# on Linux, when docker writes to any mounted volume, it uses the container's user. In most cases
# this is root. When the container exits and we try to delete them we can't because root owns those
# files. We expect this to happen, so we handle EACCESS. Any other error we will raise the
# exception up.
if exc.errno == errno.EACCES:
logger.warning("Failed to delete: %s Please remove it manually." % path)
else:
logger.error("Failed to delete: %s" % path)
raise
def _aws_credentials(session):
try:
creds = session.get_credentials()
access_key = creds.access_key
secret_key = creds.secret_key
# if there is a Token as part of the credentials, it is not safe to
# pass them as environment variables because the Token is not static, this is the case
# when running under an IAM Role in EC2 for example. By not passing credentials the
# SDK in the container will look for the credentials in the EC2 Metadata Service.
if creds.token is None:
return [
'AWS_ACCESS_KEY_ID=%s' % (str(access_key)),
'AWS_SECRET_ACCESS_KEY=%s' % (str(secret_key))
]
else:
return None
except Exception as e:
logger.info('Could not get AWS creds: %s' % e)
return None
def _write_json_file(filename, content):
with open(filename, 'w') as f:
json.dump(content, f)
def _ecr_login_if_needed(boto_session, image):
# Only ECR images need login
if not ('dkr.ecr' in image and 'amazonaws.com' in image):
return
# do we have the image?
if _check_output('docker images -q %s' % image).strip():
return
if not boto_session:
raise RuntimeError('A boto session is required to login to ECR.'
'Please pull the image: %s manually.' % image)
ecr = boto_session.client('ecr')
auth = ecr.get_authorization_token(registryIds=[image.split('.')[0]])
authorization_data = auth['authorizationData'][0]
raw_token = base64.b64decode(authorization_data['authorizationToken'])
token = raw_token.decode('utf-8').strip('AWS:')
ecr_url = auth['authorizationData'][0]['proxyEndpoint']
cmd = "docker login -u AWS -p %s %s" % (token, ecr_url)
subprocess.check_output(cmd, shell=True)
| [
"noreply@github.com"
] | noreply@github.com |
ae7c6fb32f635a0734cb1b392275d8a99e4c28fa | ece78bd2141b3ce31bb0e52b9298734b899eb05e | /Number_theory_and_Other_Mathematical/Prime_number_and_Prime_Factorization/gcd_euclid.py | b295194fc1be35d6306b0100dc322c4949fea6f6 | [] | no_license | code-project-done/Algorithms_for_Competitive_Programming_in_Python | cf4cacbe7e3f170a454dce59949973e6152737b2 | fe13c90d1bc84e1a8858c83ea32c725182106616 | refs/heads/main | 2023-04-05T04:04:26.934540 | 2021-04-11T09:01:31 | 2021-04-11T09:01:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py |
from math import gcd
def gcd_euclid(a, b):
"""
actually no need it, it is in the module math
"""
if a < b:
return gcd(b, a)
while b != 0:
a, b = b, a % b
return a
for a in range(500):
for b in range(500):
assert(gcd(a,b) == gcd_euclid(a,b)) | [
"pierre.machine.learning@gmail.com"
] | pierre.machine.learning@gmail.com |
27d32813b7fee47a8f3898e5b10327bb6f1e91ce | 25404f4cfb9be3e6f1b3fe31a1554459eb200813 | /1_todo/string_io_and_json_example.py | 5cb62ee749b5815bcf6dba5c20c390f1ac5608f1 | [] | no_license | nightimero/annal_report_test | 1c6eb4b71482f870c753f5084212afd071929f57 | 7bbc76ba703527ba8f4b84fbdb94fd57b37b9887 | refs/heads/master | 2021-09-06T21:18:59.534963 | 2018-02-11T15:31:21 | 2018-02-11T15:31:21 | 103,259,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # -*- coding: utf-8 -*-
from StringIO import StringIO
import json
io = StringIO()
json.dump(['streaming API'], io)
io.getvalue()
# '["streaming API"]'
# 2.use seperator, Compact encoding
import json
json.dumps([1, 2, 3, {'4': 5, '6': 7}], separators=(',', ':'))
'[1,2,3,{"4":5,"6":7}]'
# 3.Pretty printing: indent参数是缩进的意思
import json
print json.dumps({'4': 5, '6': 7}, sort_keys=True,
indent=4, separators=(',', ': '))
# {
# "4": 5,
# "6": 7
# }
# 4.Decoding JSON:
import json
json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
json.loads('"\\"foo\\bar"')
u'"foo\x08ar'
from StringIO import StringIO
io = StringIO('["streaming API"]')
json.load(io)
[u'streaming API']
# 5跳过错误的键值
# 另一个比较有用的dumps参数是skipkeys,默认为False。 dumps方法存储dict对象时,key必须是str类型,如果出现了其他类型的话,
# 那么会产生TypeError异常,如果开启该参数,设为True的话,则会比较优雅的过度。
data = {'b': 789, 'c': 456, (1, 2): 123}
print json.dumps(data, skipkeys=True)
#
# {"c": 456, "b": 789}
| [
"chenxiang@aiknown.com"
] | chenxiang@aiknown.com |
ea8bb3f37fef6e37cd9f9274f22db69548ed5b99 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_program_run_info36.py | df87e8835f3ba808b0a2fb5f2bbb04a979030521 | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._each343 import _each343
class _program_run_info36(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each343()
self._name = "PROGRAM_RUN_INFO"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e01b140eb36a9c67eba75192ebe27eb8b1a977f6 | 6f2a8a9d2f11d194fe41762e71ebd7270a22325b | /source/abstract/entities/electronic/controller/controller.py | 889ac5c8eca1c378a0464c9d0484d2aa82609ba9 | [] | no_license | rschum/game | 053da314a276445e03d682c6481a35aa888c5125 | 59ef0461c1ac60e690d39f6c180256f387999e44 | refs/heads/master | 2020-05-23T20:10:57.698939 | 2017-04-20T03:04:31 | 2017-04-20T03:04:31 | 84,785,024 | 0 | 0 | null | 2017-03-13T04:45:46 | 2017-03-13T04:45:46 | null | UTF-8 | Python | false | false | 193 | py | from source.abstract.entities.inanimate.controller import controller
class Controller(controller.Controller):
def __init__(self):
controller.Controller.__init__(self)
pass
| [
"Master.Foo.v.1.0.0@gmail.com"
] | Master.Foo.v.1.0.0@gmail.com |
bca0e4a8bfdd5fb61f779d7021faff1f11192a8b | 2792c12fc00de1a4057cbfe9f03c6fdcd9e45e2e | /archive/collectRaws.py | eb4bdb7386efd09957f739c943e24f386c8ebe13 | [] | no_license | tangylyre/EOG | 0b2d4d9ded3087248324e021e00c561f7279cd27 | bec8cdd2a2034272d127045880b230b131fa19ac | refs/heads/master | 2023-05-09T18:29:45.003247 | 2021-04-14T21:45:41 | 2021-04-14T21:45:41 | 326,846,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | from datetime import date
import time
import os
import numpy
import time
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
# from pygame import mixer # Load the popular external library
# import pickle # Rick
# import matplotlib.pyplot as plt
# from scipy import signal
# import scipy
from datetime import date
# BLUETOOTH PACKAGES I MAY NEED TO REMOVE
# sudo apt uninstall bluetooth pi-bluetooth bluez blueman
####################---Importing Data
###---This is for saving data
# with open('pattern.pkl','wb') as f:
# pickle.dump([pattern, data],f)
####################---Reading the GPIO
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D5)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
# create an analog input channel on pin 0
chan1 = AnalogIn(mcp, MCP.P0)
chan2 = AnalogIn(mcp, MCP.P1)
f = open('josh.txt', 'a')
f.write("\n begin log for calibration v1")
f.write(str(date.today()))
x = 0
while True:
c1 = chan1.voltage
c2 = chan2.voltage
s = ("\n%0.2f\t%0.2f" % (c1, c2))
f.write(s)
print(x, end=" ")
time.sleep(0.05)
x += 1
| [
"tangylyre@gmail.com"
] | tangylyre@gmail.com |
086a719cbdaad09756a71716cec0332342db36f1 | 8827d6e4a531d4da8ec5567f235bc79551b43a68 | /app/game/component/baseInfo/BaseInfoComponent.py | 3621b625e971a6ec0c217adea0ab9bdd12328097 | [] | no_license | wyjstar/traversing | 0f6454f257e5c66a7295ef1f9a9dca970e8a1bb7 | f32bb9e6d9f4c0d8bcee9ce0aa4923dcfc913cce | refs/heads/master | 2021-01-17T22:44:54.059161 | 2016-04-04T09:54:13 | 2016-04-04T09:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-6-10下午4:58.
"""
from app.game.component.Component import Component
class BaseInfoComponent(Component):
"""
抽象的基本信息对象
"""
def __init__(self, owner, bid, base_name):
"""
创建基本信息对象
@param id: owner的id
@param name: 基本名称
"""
Component.__init__(self, owner)
self._id = bid # owner的id
self._base_name = base_name # 基本名字
@property
def id(self):
return self._id
@id.setter
def id(self, bid):
self._id = bid
@property
def base_name(self):
return self._base_name
@base_name.setter
def base_name(self, base_name):
self._base_name = base_name | [
"guanhaihe@mobartsgame.com"
] | guanhaihe@mobartsgame.com |
3b2b4b72c827466af785eb8a9670fc7e4d2bff0d | 06ee5a5d83466896bbfd1653206da0151d6aa81a | /apps/business/serializers/file_serializer.py | ae6dac0452ba845b69a632709ac10c18ac7e31f3 | [] | no_license | fengjy96/rest_task | 201421a40ce42031223f61135d1d5e85809188e6 | db1d7c4eb2d5d229ab54c6d5775f96fc1843716e | refs/heads/master | 2020-07-22T19:48:19.940094 | 2019-09-02T13:40:11 | 2019-09-02T13:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from rest_framework import serializers
from business.models.files import Files
class FilesSerializer(serializers.ModelSerializer):
"""
文件:增删改查
"""
class Meta:
model = Files
fields = '__all__'
class FilesListSerializer(serializers.ModelSerializer):
"""
消息:增删改查
"""
class Meta:
model = Files
fields = '__all__'
depth = 1
| [
"onerf@sina.com"
] | onerf@sina.com |
e2e6053a401e98b26f59d1afe6d7fe23d264c972 | 099c4d066035463c2e9b201798a8d3b57357458a | /blog/models.py | c5faa2d6ca6c7cb6d5b41e052ae8ef3291d6929a | [] | no_license | Toluwalemi/Fast-API-Blog | 3c2123b09538f164c12258e0a4ddcfbd6a9680ff | cd62b18b052ae344f8f8e9eed7f60a20d4f3113b | refs/heads/main | 2023-05-06T09:57:47.570888 | 2021-05-27T22:01:06 | 2021-05-27T22:01:06 | 371,513,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from .database import Base
class Blog(Base):
__tablename__ = 'blogs'
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
body = Column(String)
user_id = Column(Integer, ForeignKey('users.id'))
creator = relationship("User", back_populates="blogs")
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
email = Column(String)
password = Column(String)
blogs = relationship('Blog', back_populates="creator")
| [
"toluwalemisrael@gmail.com"
] | toluwalemisrael@gmail.com |
e3280f9a700f857d1a028607a351ab8ff308aa07 | 2611a6ecda7b36511485439dcecdc11356ea98a6 | /pychemia/dft/codes/vasp/_incar.py | 7511b7cbfd02f3d14806113e4df8c7db27250c4b | [
"MIT"
] | permissive | maksimovica/PyChemia | 169fa9e2c0969d4375b2dddf935440bf35d68519 | 62b8ed06f186d0b40a628d98e7dd985efe3b7581 | refs/heads/master | 2021-01-16T17:51:42.772243 | 2014-06-18T02:18:17 | 2014-06-18T02:18:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,228 | py | """
Routines to read and write INCAR file
"""
__author__ = "Guillermo Avendano-Franco"
__copyright__ = "Copyright 2014"
__version__ = "0.1"
__maintainer__ = "Guillermo Avendano-Franco"
__email__ = "gtux.gaf@gmail.com"
__status__ = "Development"
__date__ = "March 16, 2014"
import os as _os
import numpy as _np
def load_INCAR(path):
"""
Load the file INCAR in the directory 'path' or
read directly the file 'path' and return an object
'inputvars' for pychemia
"""
if _os.path.isfile(path):
filename = path
elif _os.path.isdir(path) and _os.path.isfile(path + '/INCAR'):
filename = path + '/INCAR'
else:
print('INCAR path not found on ', path)
return
iv = InputVariables(filename=filename)
return iv
def save_INCAR(iv, path):
"""
Takes an object inputvars from pychemia and
save the file INCAR in the directory 'path' or
save the file 'path' as a VASP INCAR file
"""
if _os.path.isdir(path):
filename = path + '/INCAR'
else:
filename = path
iv.write(filename)
class InputVariables:
"""
VASP INCAR object
It contains:
data:
variables = Dictionary whose keys are ABINIT variable names
and contains the values as numpy arrays
methods:
write = Write the input into as a text file that ABINIT
can use as an input file
get_value = Get the value of a particular variable
set_value = Set the value of a particular variable
"""
variables = {}
def __init__(self, *args, **kwargs):
filename = None
if 'filename' in kwargs:
filename = kwargs['filename']
if filename is not None and _os.path.isfile(filename):
try:
self.__import_input(filename)
except ValueError:
print('File format not identified')
def __import_input(self, filename):
rf = open(filename, 'r')
for line in rf.readlines():
if '=' in line:
varname = line.split('=')[0].strip()
value = line.split('=')[1].strip()
try:
self.variables[varname] = _np.array([int(value)])
except ValueError:
try:
self.variables[varname] = _np.array([float(value)])
except ValueError:
self.variables[varname] = _np.array([value])
rf.close()
def write(self, filename):
"""
Write an inputvars object into a text
file that VASP can use as an INCAR
file
Args:
filename:
The path to 'INCAR' filename that will be written
"""
wf = open(filename, 'w')
wf.write(self.__str__())
wf.close()
def __str__(self):
ret = ''
thekeys = self.variables.keys()
for i in thekeys:
ret += self.write_key(i)
return ret
def write_key(self, varname):
"""
Receives an input variable and write their contents
properly according with their kind and length
Args:
varname:
The name of the input variable
wf:
The file object where the 'abinit.in' is been written
"""
ret = ''
if len(self.variables[varname]) == 0:
print("[ERROR] input variable: '%s' contains no elements" % varname)
return
# Assume that the variables are integer and test if such assumption
# is true
integer = True
real = False
string = False
compact = True
# Get the general kind of values for the input variable
for j in self.variables[varname]:
try:
if not float(j).is_integer():
# This is the case of non integer values
integer = False
real = True
string = False
if len(str(float(j))) > 7:
compact = False
except ValueError:
# This is the case of '*1' that could not
# be converted because we dont know the size
# of the array
integer = False
real = False
string = True
ret += (varname.ljust(15)) + " = "
for j in range(len(self.variables[varname])):
if real:
if compact:
ret += ("%g" % self.variables[varname][j]).rjust(8)
else:
ret += ("%17.10e" % self.variables[varname][j])
elif integer:
ret += ("%d" % self.variables[varname][j])
elif string:
ret += ("%s" % self.variables[varname][j])
# Conditions to jump to a new line
if ((j + 1) % 3) == 0 and real and j < len(self.variables[varname]) - 1:
ret += ";\n"
ret += 17 * " "
elif j < len(self.variables[varname]) - 1:
ret += " "
ret += ";\n"
return ret
| [
"guilleaf@msn.com"
] | guilleaf@msn.com |
00306de26033af3b73544da9760dde62a5a6bd64 | e1eac39ac5b0b28bcc704a48e4d17602ee6477eb | /news3.py | 73a1ea1da464d2fba6cb20bba96c05c2b9b852e5 | [] | no_license | CMyCode/webscrapepy | 76341f455bf483cdf29382946001697e36f3c0c6 | 6419aa2ddd71b6ff296733d7bea5ba2e7d0413a3 | refs/heads/master | 2020-03-08T23:48:13.940815 | 2018-06-02T01:24:56 | 2018-06-02T01:24:56 | 128,473,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,006 | py | import requests
from bs4 import BeautifulSoup
import re
import sys
import os
import datetime
import pandas as pd
from random import choice
from collections import defaultdict
from nltk.corpus import stopwords
from datetime import datetime, timedelta
from random import *
import string
import nltk
from nltk.tokenize import TreebankWordTokenizer
nltk.download('all')
# Function to generate date in required format
def DateFunctions(request):
if request == 'Y':
yesterday = datetime.now() - timedelta(days=1)
return yesterday.strftime('%Y%m%d')
else:
return datetime.now().strftime('%Y%m%d')
# Function to generate Full file name in required format
def GenerateFileName(
path,
request,
FileString,
FileExt,
):
fdate = DateFunctions(request)
LinkFileName = FileString + fdate + '.' + FileExt
LinkFileNameFull = os.path.join(path, LinkFileName)
return LinkFileNameFull
# Function to Pull all the links and create a file
# File is used by nextday's process to identify Fresh News on that day
def CreateLinksFile(file):
AllLinks = {}
Cnt = 1
url = 'http://tolivelugu.com/'
resp = requests.get(url)
if resp.status_code == 200:
print( 'Successfully opened the web page:--> {}'.format(url))
# print 'The news Links are below :-\n'
soup = BeautifulSoup(resp.text, 'html.parser')
for j in set(soup.find_all('a', href=True, title=True)):
if re.search('eruditesoft', str(j)) or re.search('/videos/'
, str(j)):
pass
else:
AllLinks[j['href']] = Cnt
Cnt += 1
with open(file, 'w') as f:
for key in AllLinks.keys():
f.write(key + '\n')
return AllLinks
else:
print ('Error')
# Function to identify Fresh news as on that day
def CompareAndGenDiff(CurrLinks, PrevFile,Dataset):
if os.path.exists(PrevFile) and Dataset != 'ALL':
with open(PrevFile, 'r') as rf:
PrevLinks = rf.readlines()
# print(PrevLinks)
for CurrLink in CurrLinks.keys():
CurrLink1 = str(CurrLink) + '\n'
if CurrLink1 in PrevLinks:
pass
else:
NewLinks.append(CurrLink)
return NewLinks
else:
print('Either previous day file not present or dataset was set to ALL')
return list(CurrLinks.keys())
def GetWordCount2(data):
tokenizer = TreebankWordTokenizer()
stop_words = set(stopwords.words('english'))
words = []
POSVals={}
wordcount = defaultdict(int)
for i in data:
if i == '\n':
continue
else:
#i = i.encode('utf-8')
words = tokenizer.tokenize(i)
# print(words)
for j in set(words):
#j = j.decode('utf-8').strip()
wordcount[j] = wordcount[j] + words.count(j)
# print(wordcount)
# print 'WORD::::::::::COUNT'
for (k, v) in wordcount.items():
if k.lower() in stop_words:
del wordcount[k]
else:
#print(PosTags(k))
POSVals[k]=PosTags(k)
#print(POSVals)
return {'WORDS':[k for k in sorted(wordcount.keys())],'COUNTS':[wordcount[k] for k in sorted(wordcount.keys())],'POS':[POSVals[k] for k in sorted(wordcount.keys())]}
# Function to read content from the link provided
def ReadNews(link):
lresp = requests.get(link)
if lresp.status_code == 200:
print ('Successfully opened the web page:-->{}'.format(link))
#print 'Content Below:-\n'
Csoup = BeautifulSoup(lresp.text, 'html.parser')
# Csoup=Csoup.encode('utf-8')
# txtFile=str(filename)+'.txt'
# fpath = os.path.join(path, txtFile)
# f = open(fpath, 'w')
# f.write((Csoup.prettify()))
# f.close()
# print link
for j in Csoup.find_all('div', attrs={'class': 'desc_row'}):
if re.search('eruditesoft', str(j)):
pass
else:
text = j.find_all(text=True)
return text # .encode('utf-8')
else:
print ('Error')
# Function to Generate WORD COUNT exclduing stop words
def GetWordCount(data):
stop_words = set(stopwords.words('english'))
words = []
wordcount = defaultdict(int)
for i in data:
if i == '\n':
continue
else:
#i = i.encode('utf-8')
words = i.split(' ')
# print(words)
for j in set(words):
#j = j.decode('utf-8').strip()
wordcount[j] = wordcount[j] + words.count(j)
# print(wordcount)
# print 'WORD::::::::::COUNT'
for (k, v) in list(wordcount.items()):
if k.lower() in stop_words:
del wordcount[k]
return wordcount
def GetExcelSheetName(url):
Sname = ''
SplitList = url.split('/')
NameList = SplitList[-2].split('-')
for i in range(len(NameList)):
Sname = Sname + NameList[i]
if len(Sname) >= 15:
return Sname[0:12]
else:
return Sname
def RandomTextGen():
RText=''
str=string.ascii_uppercase
return RText.join(choice(str) for x in range(3))
def PosTags(word):
#print(word)
if word not in list(string.punctuation):
ValNTag=list(nltk.pos_tag([word]))
#print(ValNTag)
if any([ValNTag[0][1]=="NN",ValNTag[0][1]=="NNP",ValNTag[0][1]=="NNS",ValNTag[0][1]=="NNPS"]):
return 'NOUN'
elif any([ValNTag[0][1]=="WP",ValNTag[0][1]=="WPS",ValNTag[0][1]=="PRP",ValNTag[0][1]=="PRPS"]):
return 'PRONOUN'
elif any([ValNTag[0][1]=="VBN",ValNTag[0][1]=="VB",ValNTag[0][1]=="VBD",ValNTag[0][1]=="VBG",ValNTag[0][1]=="VBGN",ValNTag[0][1]=="VBP",ValNTag[0][1]=="VBZ"]):
return 'VERB'
elif any([ValNTag[0][1]=="JJ",ValNTag[0][1]=="JJR",ValNTag[0][1]=="JJS"]):
return 'ADJECTIVE'
elif any ([ValNTag[0][1]=="RB",ValNTag[0][1]=="RBR",ValNTag[0][1]=="RBS",ValNTag[0][1]=="WRB"]):
return 'ADVERB'
else :
return 'OTHERS'
else:
return 'OTHERS'
# Program starts in here
path = os.getcwd()
#path='D:\python\News' #edit this with the path you need
Dataset='DELTA' # chnage this to something else ('DELTA') if you want to see difference data
CurrFileName = GenerateFileName(path, 'T', 'NEWS', 'TXT')
PrevFileName = GenerateFileName(path, 'Y', 'NEWS', 'TXT')
CountsFileName = GenerateFileName(path, 'T', 'Counts_', 'xls')
NewLinks = []
TodaysNewsALL = CreateLinksFile(CurrFileName)
TodaysNewsLatest = CompareAndGenDiff(TodaysNewsALL, PrevFileName,Dataset)
ExWriter = pd.ExcelWriter(CountsFileName)
for todaysnews in TodaysNewsLatest:
Content = ReadNews(todaysnews)
WorndsNCountsNPOS = GetWordCount2(Content)
SheetName = GetExcelSheetName(todaysnews)+RandomTextGen()
Header = ({'NewsURL':[todaysnews]})
df_Header = pd.DataFrame(Header)
df_Header.to_excel(ExWriter,SheetName, index=False)
df_POSData=pd.DataFrame(WorndsNCountsNPOS)
df_POSData.to_excel(ExWriter,SheetName, index=False,startrow=3)
print('TAB :-->{} Created'.format(SheetName))
ExWriter.save()
| [
"balaji.meka1@t-mobile.com"
] | balaji.meka1@t-mobile.com |
e6df5acf08fd8bbbf178c132ccb74291ca5e33b8 | 10d2ff5992d0d5d5dfc25350c56b030f4b669f29 | /driven_by_mysql/driven_by_mysql.py | a442adc26688af65e4860544f9d412ee3dcf215c | [] | no_license | hongzhiguang/DataDriver | 5ca42e92d3b725fc34651e01ba59cf88fae61749 | ca997910000a131a0beb420a516071f8ba700552 | refs/heads/master | 2020-06-03T01:58:49.861415 | 2019-06-11T14:26:07 | 2019-06-11T14:26:07 | 191,387,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import ddt
import unittest
import time
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from Config import *
from ReadData import *
logging.basicConfig(
level=logging.INFO,
format="%%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt ='%a, %Y-%m-%d %H:%M:%S',
filename="report.txt",
filemode = "a+"
)
db_data = QueryData(host=host,user=user,password=password,port=port,charset=charset)
db_data.select_db(database)
row_num = db_data.count_num()
data = []
for i in range(1,row_num+1):
test_data = db_data.get_one(table, i, test_data_col_no)
expect_data = db_data.get_one(table, i, expect_data_col_no)
data.append((test_data,expect_data))
@ddt.ddt
class DrivenBySql(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path="c:\chromedriver")
@ddt.data(*data)
def test_by_sql(self,data):
testData,expetData = tuple(data)
print(testData,expetData)
url = "https://www.baidu.com"
self.driver.get(url)
self.driver.implicitly_wait(10)
try:
self.driver.find_element_by_id("kw").send_keys(testData)
self.driver.find_element_by_id("su").click()
time.sleep(5)
self.assertTrue(expetData in self.driver.page_source)
except NoSuchElementException as e:
logging.error("页面找不到!")
except AssertionError as e:
logging.info("断言失败!")
db_data.insert_res(table,"断言失败",testData)
except Exception as e:
logging.error("其他错误!")
else:
db_data.insert_res(table,"成功",testData)
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
db_data.close() | [
"1583297821@qq.com"
] | 1583297821@qq.com |
71fd2309a48331c48fd5788811c68992705cdd2a | b57a5f2613d626c96beab02966f2075848cb1d8f | /Linear Regression.py | 6ebab678842e0131d9e3c280ecda3ae18ce3d147 | [] | no_license | rishabds/Machine-Learning- | e29e9917fa883c5756442b2d358029dd0ac8b3be | f966bffa1012787f77fc37ac03c5f124202601c6 | refs/heads/master | 2020-08-01T18:21:38.855225 | 2019-09-26T11:48:16 | 2019-09-26T11:48:16 | 211,074,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | import numpy as np
import matplotlib.pyplot as plt
def estimate_coef(x, y):
# number of observations/points
n = np.size(x)
# mean of x and y vector
m_x, m_y = np.mean(x), np.mean(y)
# calculating cross deviation and deviation about x
SS_xy = np.sum(y*x)-n*m_y*m_x
SS_xx = np.sum(x*x)-n*m_x*m_x
# calculating regression coefficients
b_1 = SS_xy / SS_xx
b_0 = m_y - b_1*m_x
return(b_0, b_1)
def plot_regression_line(x, y, b):
# plotting the actual points as scatter plot
plt.scatter(x, y, color="m",marker="o", s=30)
# predicted response vector
y_pred = b[0] + b[1]*x
# plotting the regression line
plt.plot(x, y_pred, color="b")
# putting labels
plt.xlabel('x')
plt.ylabel('y')
# function to show plot
plt.show()
def main():
# observations
x = np.array([25, 17, 12, 8, 3, 15, 37, 20, 4])
y = np.array([8, 14, 26, 35, 45, 18, 4, 15, 40])
# estimating coefficients
b = estimate_coef(x, y)
print("Estimated coefficients:\nb_0 = {}\nb_1 = {}".format(b[0], b[1]))
# plotting regression line
plot_regression_line(x, y, b)
if __name__ == "__main__":
main() | [
"rishabdussoye@hotmail.com"
] | rishabdussoye@hotmail.com |
b1ac9099c36ddeeab3548464dd1b5d5e9b1ee687 | 84d2040faf1acaabedce67e884b55767b6b98e57 | /source/watches/migrations/0003_auto_20210305_1130.py | e955040939fd33e381c347577ff1f00f4c1035ee | [] | no_license | UuljanAitnazarova/watches_shop | 3adae63141107c91ae6a489dddeb8f8fa9433666 | 6f54b11d468957cf05275c37b17f4c2e669e9fc2 | refs/heads/master | 2023-05-08T11:51:25.597190 | 2021-05-27T12:48:46 | 2021-05-27T12:48:46 | 344,481,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 3.1.7 on 2021-03-05 11:30
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watches', '0002_auto_20210304_1426'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_availability',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Остаток'),
),
]
| [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
11247c56107695e84821a8412a5d43b66542c9fc | a5d0a0499dd069c555080c8cefc2434304afead4 | /Programmers/pipe.py | bfa9ff3f16b5e878de473bd4fbe430f11b47ebcb | [] | no_license | devjinius/algorithm | 9bdf9afc021249b188d6930cf9d71f9147325d9f | 007fa6346a19868fbbc05eefd50848babb5f1cca | refs/heads/master | 2020-05-04T06:08:32.827207 | 2019-07-31T02:39:39 | 2019-07-31T02:39:39 | 178,999,456 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # 프로그래머스 쇠막대기
# https://programmers.co.kr/learn/courses/30/lessons/42585
def solution(arrangement):
stack = []
prevStr = ''
count = 0
for word in arrangement:
if(word == ")"):
if(prevStr == "("):
stack.pop()
count += len(stack)
else:
stack.pop()
count += 1
else:
stack.append(word)
prevStr = word
return count
| [
"eugenekang94@gmail.com"
] | eugenekang94@gmail.com |
54b812bf117a999b6cb8c36a01995b5107aa8ea4 | 0db729c520410c95139589098f303f1de30b99f5 | /radio/programs/migrations/0002_event.py | 747a22373c73b67b6534532496f3897f70895668 | [] | no_license | deisaack/radio | b76cca194f6a02ca27dea5f8b24925611016545c | e6abf60f4462e88ff5b8f88b024093ac3067c947 | refs/heads/master | 2021-06-22T12:44:20.452555 | 2017-08-23T06:19:22 | 2017-08-23T06:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-30 13:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
('programs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(blank=True, null=True)),
('title', models.CharField(max_length=250)),
('description', models.TextField(default='')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('presenters', models.ManyToManyField(to='profiles.Staff')),
],
),
]
| [
"pkemey@gmail.com"
] | pkemey@gmail.com |
fbdcda5ade7f09825d2bccbf2d035d00975e7893 | 6861adad57bdd1fd357829e3f422081ac4a25c60 | /S7_CIFAR10_Modular/models/S7_Model.py | 7afee0cfe07dc2fdb10f3556b3846715bae7bac7 | [] | no_license | anilbhatt1/Deep_Learning_EVA4_Phase1 | 21373061cd5ec318d2eaa8d615262d7340a59472 | b1a0bb5c55de087a3dd065648cd8690b8f22a912 | refs/heads/master | 2020-12-27T06:53:59.858580 | 2020-04-25T15:12:54 | 2020-04-25T15:12:54 | 237,800,803 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
dropout_value = 0.05
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# CONVOLUTION BLOCK 1
self.convblock1A = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Dropout(dropout_value)
) # in = 32x32x3 , out = 32x32x32, RF = 3
self.dilated1B = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=2, bias=False, dilation=2),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(dropout_value)
) # in = 32x32x32 , out = 32x32x64, RF = 7
# TRANSITION BLOCK 1
self.pool1 = nn.MaxPool2d(2, 2) # in = 32x32x64 , out = 16x16x64, RF = 8
self.tran1 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(1, 1), padding=0, bias=False)
) # in = 16x16x64 , out = 16x16x32, RF = 6
# CONVOLUTION BLOCK 2
self.convblock2A = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(dropout_value)
) # in = 16x16x32 , out = 16x16x64, RF = 12
self.depthwise2B = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1, bias=False, groups=64),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(dropout_value)
) # in = 16x16x1x64 , out = 16x16x64, RF = 16
self.pointwise2C = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(1, 1), padding=0, bias=False)
) # in = 16x16x64 , out = 16x16x128, RF = 16
# TRANSITION BLOCK 2
self.pool2 = nn.MaxPool2d(2, 2) # in = 16x16x128 , out = 8x8x128, RF = 18
self.tran2 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=32, kernel_size=(1, 1), padding=0, bias=False)
) # in = 8x8x128 , out = 8x8x32, RF = 18
# CONVOLUTION BLOCK 3
self.convblock3A = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(dropout_value)
) # in = 8x8x32 , out = 8x8x64, RF = 26
# TRANSITION BLOCK 3
self.pool3 = nn.MaxPool2d(2, 2) # in = 8x8x64 , out = 4x4x64, RF = 30
self.tran3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(1, 1), padding=1, bias=False)
) # in = 4x4x64 , out = 4x4x32, RF = 30
# OUTPUT BLOCK
self.Gap1 = nn.Sequential(
nn.AvgPool2d(kernel_size=4)
) # in = 4x4x32 , out = 1x1x32, RF = 54
self.fc1 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=10, kernel_size=(1, 1), padding=0, bias=False)
) # in = 1x1x32 , out = 1x1x10, RF = 54
def forward(self, x):
x = self.dilated1B(self.convblock1A(x))
x = self.tran1(self.pool1(x))
x = self.pointwise2C(self.depthwise2B(self.convblock2A(x)))
x = self.tran2(self.pool2(x))
x = self.convblock3A(x)
x = self.tran3(self.pool3(x))
x = self.fc1(self.Gap1(x))
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
| [
"noreply@github.com"
] | noreply@github.com |
32f2f00bef6097a79694d585d991c08882b2179c | 02f3cf8d895ef167be50a4865bb03210f5ab4c4b | /setup.py | dd93efa4948162a15dba1a29b316b3eb817c94fc | [] | no_license | cy-sohn/my-vot-toolkit-python-master | 67a7d51f70b10d6622d32adb2a1b35d2177fd23b | e1e78c0064e8ad33559a6a72d35e2eb9b8395f62 | refs/heads/master | 2022-12-02T10:35:54.695104 | 2020-08-07T06:36:34 | 2020-08-07T06:36:34 | 285,754,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python
from os.path import join, dirname, abspath, isfile
from distutils.core import setup
this_directory = abspath(dirname(__file__))
with open(join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = []
if isfile(join(this_directory, "requirements.txt")):
with open(join(this_directory, "requirements.txt"), encoding='utf-8') as f:
install_requires = f.readlines()
__version__ = "0.0.0"
exec(open(join(dirname(__file__), 'vot', 'version.py')).read())
setup(name='vot-toolkit',
version=__version__,
description='Perform visual object tracking experiments and analyze results',
long_description=long_description,
long_description_content_type='text/markdown',
author='Luka Cehovin Zajc',
author_email='luka.cehovin@gmail.com',
url='https://github.com/votchallenge/toolkit',
packages=['vot', 'vot.analysis', 'vot.dataset', 'vot.experiment', 'vot.region', 'vot.stack', 'vot.tracker', 'vot.utilities'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
],
python_requires='>=3.6',
entry_points={
'console_scripts': ['vot=vot.utilities.cli:main'],
},
)
| [
"64947778+cy-sohn@users.noreply.github.com"
] | 64947778+cy-sohn@users.noreply.github.com |
ed283187764b5f7ce7a792ed6445b60d8b759828 | a066ef198ddd1ce9a77bb5cf4495f5f937dc642e | /2017/Day 17/Day17_Part1.py | a56396e29719fe398fd30eac1ed174141dd651ea | [] | no_license | olber027/AdventOfCode2017 | 1719860d7410a04ca356b0de921b45357448b426 | c664100bab9ab31af231404e6b5cc5bc51fc9cf8 | refs/heads/master | 2021-05-14T09:10:16.016241 | 2019-01-02T20:44:12 | 2019-01-02T20:44:12 | 116,319,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | class Node:
def __init__(self, value, next):
self.value = value
self.next = next
def insert(self, value):
node = Node(value, self.next)
self.next = node
def getNext(self):
return self.next
def __repr__(self):
return "{0}".format(self.value)
file = open("InputFiles/Day17.dat")
numSteps = int(file.readline().strip())
head = Node(0, None)
currentNode = head
count = 1
for _ in range(2017):
for i in range(numSteps):
if currentNode.getNext() is None:
currentNode = head
else:
currentNode = currentNode.getNext()
currentNode.insert(count)
currentNode = currentNode.getNext()
count += 1
print(currentNode.getNext()) | [
"olber027@gmail.com"
] | olber027@gmail.com |
aa3d71e2ea62b064980ca47bda05ad3822b8f0a9 | ab13952a85e5d58fec5aadb5d978b4f1f822035d | /tests/test_api_functions.py | f1551ec62ed5270bcc53b845b198e123affefd33 | [
"MIT"
] | permissive | edison12a/portal_glance_api | da62e5a60e2608ccaf555c36f6300f32f8d8df22 | 76692df23ee5e8d05169e0e2af6b8ddbaee75b01 | refs/heads/master | 2023-05-27T14:38:51.749470 | 2019-01-17T19:31:29 | 2019-01-17T19:31:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,605 | py | """
This module contains all tests for glance_api.modules.functions.py
"""
import os
import pytest
import requests
import sqlalchemy
from glance_api.modules import functions
from glance_api.modules import models
from glance_api import api
# TODO: Finish testing Item
# TODO: Currently using sqlite3 database for tests, need to use postgres instead
# TODO: Figure out how to make test database in postgres programmically.
@pytest.fixture(scope='session')
def connection(request):
db_name = 'sqlite_test_database.db'
engine = sqlalchemy.create_engine(f'sqlite:///tests/{db_name}')
models.Base.metadata.create_all(engine)
connection = engine.connect()
api.session.registry.clear()
api.session.configure(bind=connection)
models.Base.metadata.bind = engine
request.addfinalizer(models.Base.metadata.drop_all)
return connection
@pytest.fixture
def db_session(request, connection):
trans = connection.begin()
request.addfinalizer(trans.rollback)
from glance_api.api import session
return session
def test_Item_with_no_session():
with pytest.raises(TypeError):
functions.Item()
def test_Item_tags_from_queries_returns_type_list(db_session):
test_data = {'filter': 'image', 'filter_people': None, 'query': 'animal'}
test_method = functions.Item(db_session)._tags_from_queries(test_data)
assert type(test_method) == list
def test_Item_tags_from_queries_no_tags(db_session):
test_data = {'filter': 'image', 'filter_people': None, 'query': 'TEST_TAGS'}
test_method = functions.Item(db_session)._tags_from_queries(test_data)
assert len(test_method) == 0
def test_Item_tags_from_queries_tags(db_session):
test_query = {'filter': 'image', 'filter_people': None, 'query': ''}
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 3
def test_Item_tags_from_queries_query(db_session):
test_query = {'filter': '', 'filter_people': None, 'query': 'querytag notfoundtag'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 2
def test_Item_tags_from_queries_filter_people(db_session):
test_query = {'filter': 'people', 'filter_people': '_one _group', 'query': 'none'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 2
def test_Item_tags_from_queries_filter_people_and_query(db_session):
test_query = {'filter': 'people', 'filter_people': '_one _group', 'query': 'querytag'}
test_tags = ['_one', '_group', 'querytag', 'notfoundtag']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_method = functions.Item(db_session)._tags_from_queries(test_query)
assert len(test_method) == 3
def test_Item_filter_tags_returns_list(db_session):
test_query = {'filter': 'image', 'filter_people': None, 'query': ''}
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
tags = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, tags)
assert type(test_method) == list
def test_Item_filter_tags_image_has_tags(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': 'image', 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
get_tag = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
new_image.tags.append(get_tag)
db_session.add(new_image)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 1
def test_Item_filter_tags_image_has_no_tags(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': 'image', 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
db_session.add(new_image)
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 0
def test_Item_filter_tags_no_filter(db_session):
test_tags = ['TEST_TAG_ONE', 'TEST_TAG_TWO', 'TEST_TAG_THREE']
test_query = {'filter': None, 'filter_people': None, 'query': ' '.join(test_tags)}
new_image = models.Image(name='test')
new_footage = models.Footage(name='test')
db_session.add(new_image)
db_session.add(new_footage)
for tag in test_tags:
new_tag = models.Tag(name=tag)
db_session.add(new_tag)
get_tag_one = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
get_tag_two = db_session.query(models.Tag).filter_by(name=test_tags[0]).first()
new_image.tags.append(get_tag_one)
new_footage.tags.append(get_tag_two)
test_new_tag = db_session.query(models.Tag).all()
test_method = functions.Item(db_session)._filter_tags(test_query, test_new_tag)
assert len(test_method) == 2
def test_Item_get_id_does_not_exists(db_session):
test_data = {'id': 999, 'query': None, 'filter_people': None}
test_method = functions.Item(db_session).get(id=test_data['id'])
assert test_method == None
# TODO: Figure out how to make a test database in postgres programically
# for the following tests.
"""
def test_Item_get_id_does_exists(db_session):
test_data = {'id': 1, 'query': None, 'filter_people': None}
new_item = models.Item(type='image')
db_session.add(new_item)
test_method = functions.Item(db_session).get(id=test_data['id'])
assert test_method == True
def test_Item_delete():
pass
def test_Item_post():
pass
def test_Item_patch():
pass
""" | [
"rory.jarrel@gmail.com"
] | rory.jarrel@gmail.com |
d72e7e11bde474750297d7c0e0c5be87fe00bcb8 | 01b0227ed8e8955491e4465d8ebbda9e835d83f4 | /python/prueba_completa.py | 73eb094a6ca028fc16c6a7e78a3692c6d81151df | [] | no_license | LeoBrasileo/QR-Assistance | 4ab0b3e87c9f5683cf2f06ee8d46fb5656329af2 | 10ae308baf841bb0e5ad8432a1c150c9c95e1b95 | refs/heads/master | 2023-01-24T17:04:58.133133 | 2019-11-19T13:44:22 | 2019-11-19T13:44:22 | 194,198,054 | 2 | 0 | null | 2023-01-09T11:53:34 | 2019-06-28T03:11:44 | Java | UTF-8 | Python | false | false | 1,078 | py | import schedule
import time
import pyrebase
import random
import qrcode
import numpy as np
import cv2
# Configuracion e inicializacion de Firebase
config = {
"apiKey": "AIzaSyAbMl8vM1IHJj6ygDad_TSg4b8daYQVXJA",
"authDomain": "ususarios-3b9a8.firebaseapp.com",
"databaseURL": "https://ususarios-3b9a8.firebaseio.com",
"storageBucket": "ususarios-3b9a8.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database ()
# Callback del scheduler
def job():
print("Updating Firebase...")
# Genero mensaje aleatorio
msg =random.randrange (1,392022)
msg = str (msg)
# Subo el mensaje (string) a Firebase
data = {"numeros": msg}
db.child ("qrs").set(data)
# Genero el QR con el mensaje (string)
name = 'QR_'+msg+'.jpg'
qr = qrcode.make(msg)
print("Nombre del archivo: "+name)
print("Mensaje: "+msg)
print("")
qr.save(name)
img = cv2.imread(name)
cv2.imshow('image',img)
k = cv2.waitKey(1)
#----------------------------------------------------------
schedule.every(3).seconds.do(job)
# Loop
while True:
schedule.run_pending()
time.sleep(1)
| [
"leobraginski@gmail.com"
] | leobraginski@gmail.com |
3559b52d2c097d90ee4e42b0914bc1c2f4394ae1 | 0b1079e405bad2950a8799c30a46fc6984b922bf | /venv/Scripts/django-admin.py | 5596281abc86563d7351294fdfe04acdbc24d41c | [] | no_license | dpridoy/employee_test_api | ddc3786aecd9a3efd8f1d09c2749f237a2c31dee | 7b5da0f3bd6cd25500234952a701f34fc80c0a70 | refs/heads/main | 2023-03-07T14:38:50.817090 | 2021-02-09T09:25:50 | 2021-02-09T09:25:50 | 337,337,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #!c:\users\dma-ridoy\documents\django projects\api_test\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"dpridoy@gmail.com"
] | dpridoy@gmail.com |
b7201790e7f7e245810a4e7196093347bcf5631a | fed4c4352fba76fe5bbf1ebb7c3f2a845f864794 | /src/allStat.py | d78ca9d2f9d2f81468f8995929672180ee79f0c3 | [
"MIT"
] | permissive | theandyb/cleanCollegeBBallData | 1e9011fad30bb2c029c37d0c15235c600d5edf1a | 13ca29055cf84894c22d8b128436507550f11358 | refs/heads/master | 2020-05-17T06:03:10.526095 | 2015-03-08T16:12:09 | 2015-03-08T16:12:09 | 31,838,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import sys,getopt
try:
opts,args = getopt.getopt(sys.argv[1:], "r",["recursive"])
except getopt.GetoptError:
print "Lulz"
reclass = False
for o,a in opts:
if o == "-r":
reclass = True
fName = "./raw_data/all_stat2.csv"
num = 1
outFile = "./raw_data/" + str(num) + ".csv"
out = open(outFile,'w')
with open(fName,"r") as f:
for line in f:
if reclass:
if not "Reclassifying" in line:
if not "NCAA" in line:
out.write(line)
else:
out.close()
num += 1
outFile = "./raw_data/" + str(num) + ".csv"
out = open(outFile,'w')
else:
if not "Reclassifying" in line:
out.write(line)
else:
out.close()
num += 1
outFile = "./raw_data/" + str(num) + ".csv"
out = open(outFile,'w')
out.close()
print("Done!")
| [
"beckandy@umich.edu"
] | beckandy@umich.edu |
9cb991b352cb52bf657354c37ce5000bcdddc967 | 5072235c3798e182705017c684da42897f5710d4 | /tools/general_tools.py | bbb6a5ff74afc17afa0e1e7d16e18625001f680a | [
"Apache-2.0"
] | permissive | gafzan/AlgorithmicTradingStrategies-Decommissioned- | a4522f68db64f37fcc24ac5d74d824cf45345d87 | 80e7f97bf75b22de52130de5ad247342488b27f4 | refs/heads/master | 2023-08-14T23:29:26.065878 | 2021-10-17T09:18:01 | 2021-10-17T09:18:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | """
general_tools.py
"""
from itertools import zip_longest
from operator import itemgetter
from datetime import date
# ______________________________________________________________________________________________________________________
# Handling lists and dictionaries
def _grouper(iterable, n, fill_value=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fill_value)
def list_grouper(iterable, n, fill_value=None):
g = list(_grouper(iterable, n, fill_value))
try:
g[-1] = [e for e in g[-1] if e is not None]
return [list(tup) for tup in g]
except IndexError:
return [[]]
def extend_dict(dict_1: dict, dict_2: dict) -> dict:
"""Assumes that dic_1 and dic_2 are both dictionaries. Returns the merged/combined dictionary of the two
dictionaries."""
return {**dict_1, **dict_2}
def reverse_dict(dict_: dict) -> dict:
"""Assumes that dict_ is a dictionary. Returns a new dictionary where the keys and values have been reversed.
old dictionary: {keys: values}
new dictionary: {values: keys}"""
return {value: key for key, value in dict_.items()}
def check_required_keys(required_keys: list, dictionary: dict):
"""
Raises an error if any of the elements in the given list does not exists as a key in the given dictionary
:param required_keys: list
:param dictionary: dict
:return: None
"""
if any(req_key not in dictionary.keys() for req_key in required_keys):
raise ValueError("'%s' are not specified" % "', '".join(set(required_keys).difference(dictionary.keys())))
return
def get_values_from_key_list(dictionary: dict, key_list: list):
"""
Returns a list of values based on each key in the given list
:param dictionary: dict
:param key_list: list of keys
:return:
"""
return list(itemgetter(key_list)(*dictionary))
def translate_value_key_dict(dictionary: dict, new_old_key_map: dict, old_new_value_per_old_key_map: dict):
"""
Adjust the keys and values of the given dictionary according to the specified mappers
:param dictionary: dict
:param new_old_key_map: dict {new key: old key}
:param old_new_value_per_old_key_map: dict {old key: {old value: new value}}
:return: dict
"""
# find the keys where the corresponding value needs to change and
value_adj_keys = [key for key in old_new_value_per_old_key_map.keys() if key in dictionary.keys()]
# change each value according to the mapper
for value_adj_key in value_adj_keys:
dictionary.update(
{
value_adj_key: old_new_value_per_old_key_map[value_adj_key].get(
dictionary[value_adj_key],
dictionary[value_adj_key]
)
}
)
# change each key according to the mapper
return dictionary
# ______________________________________________________________________________________________________________________
# Handling strings
def string_is_number(s: str) -> bool:
"""Assumes s is a string. Returns boolean. If the string can be converted to a number then True, else false."""
try:
float(s)
return True
except ValueError:
return False
def capital_letter_no_blanks(input_: {str, list}) -> {str, list}:
"""Assumes input_ is a string or list. Returns a string with capital letters and blanks replaced by '_'."""
if isinstance(input_, str):
return input_.upper().replace(' ', '_')
elif isinstance(input_, list): # if list, adjust all elements recursively
new_list = []
for e in input_:
if isinstance(e, str):
new_list.append(capital_letter_no_blanks(e))
else:
new_list.append(e)
return new_list
else:
raise TypeError("Can't change to capital letters and remove blanks for an object of type {}."
.format(type(input_)))
def capital_letter_no_blanks_list(variable_list: list) -> list:
"""Assumes that variable_list is a list. Returns a new list where all strings have been adjusted to have capital
letters and blanks replaced by '_'."""
new_list = []
for e in variable_list:
if isinstance(e, str):
new_list.append(capital_letter_no_blanks(e))
else:
new_list.append(e)
return new_list
def progression_bar(counter: int, goal: int) -> None:
"""Assumes counter and goal are int. Script prints a progression bar."""
result = progression_bar_str(counter, goal)
print(result)
def progression_bar_str(counter: int, goal: int) -> str:
"""Assumes counter and goal are int. Script prints a progression bar."""
if counter > goal:
raise ValueError("'counter' needs to be smaller or equal to 'goal'")
if counter < 0 or goal <= 0:
raise ValueError("'counter' can't be negative and 'goal' needs to be larger than zero.")
progression_percentage_str = str(round(100 * counter / goal, 2)) + '%'
length = 100
steps = length / goal
return_string = '[{}{}] {} ({}/{})'.format(int(counter * steps) * '*', int((goal - counter) * steps) * ' ', progression_percentage_str, counter, goal)
return return_string
def user_picks_element_from_list(list_: list):
"""Assumes that list_ is a list. Script will print a list of all the elements and then ask user to pick one.
Returns the chosen element."""
if len(list_) == 0:
raise ValueError('List is empty.')
for i in range(len(list_)):
print('{}: {}'.format(i + 1, list_[i]))
ask_user = True
while ask_user:
try:
list_index = int(input('Enter a number between 1 and {}:'.format(len(list_))))
assert 1 <= list_index <= len(list_)
ask_user = False
except (ValueError, AssertionError):
pass
return list_[list_index - 1]
def ask_user_yes_or_no(question: str)->bool:
"""
Asks a question to user and user needs to say 'yes' or 'no' (several versions are accepted)
:param question: str
:return: bool
"""
accpetable_yes = ['sure', 'yeah', 'yes', 'y']
accpetable_no = ['no', 'n', 'nope']
while True:
answer = input(question + '\nYes or No?: ').lower()
if answer in accpetable_yes:
return True
elif answer in accpetable_no:
return False
else:
print("'{}' is not an acceptable answer...\n".format(answer))
def time_period_logger_msg(start_date: {date, None}, end_date: {date, None}):
"""
Returns a string to be used in a logger message telling us about the time eriod we are looking at
:param start_date: date, None
:param end_date: date, None
:return: str
"""
return '' if start_date is None else ' from {}'.format(start_date) + '' if end_date is None else ' up to {}'.format(end_date)
| [
"gafzan@gmail.com"
] | gafzan@gmail.com |
0daa335585b3be2faeb308e446ff223bc54d6ac7 | e41b8e6aa152ce0650f7cdafe044b465a4466ad2 | /chap03 decision tree/decision tree.py | 1094ebbf532af4205b6dde685fa5e6ed71f517cc | [
"MIT"
] | permissive | apachesep/code-of-machine-learning | c73040b24cd75149af769832b620a5a24642b026 | 843a1a23c801b97bba3a818e5adb143d869c8286 | refs/heads/master | 2022-11-22T06:48:51.117693 | 2020-07-24T07:52:41 | 2020-07-24T07:52:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: Zhiqiang Ho
@contact: 18279406017@163.com
@file: decision tree.py
@time: 7/23/20 8:38 AM
@desc:
'''
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn import tree
import pandas as pd
sns.set_style("white")
def get_data():
housing = fetch_california_housing()
df = pd.DataFrame(data=housing.data, columns=housing.feature_names)
df_y = pd.DataFrame(data=housing.target, columns=housing.target_names)
df[str(df_y.columns.values[0])] = df_y
x = df[["Latitude", "Longitude"]].as_matrix()
y = df[["MedHouseVal"]].as_matrix()
x_train, x_text, y_train, y_test = train_test_split(x,y,test_size=0.1, random_state=0)
return x_train, x_text, y_train, y_test
def write_tree_model(model, filename):
# sudo apt-get install graphviz
import pydotplus
dot_data = tree.export_graphviz(decision_tree=model, out_file=None,
feature_names=["Latitude", "Longitude"], filled=True, impurity=False, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_png(filename)
def main(is_write_grid=False):
x_train, x_text, y_train, y_test = get_data()
# sklearn.grid_search can find parameter
model = tree.DecisionTreeRegressor(max_depth=2)
model.fit(X=x_train, y=y_train)
score = model.score(x_text, y_test)
print("score is {}".format(score))
if is_write_grid:
write_tree_model(model, "tree_model.png")
if __name__ == '__main__':
main()
| [
"18279406017@163.com"
] | 18279406017@163.com |
346d811811941e402f2c375d0f49101f32158661 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/scaleform/daapi/view/lobby/customization/camouflageinterface.py | 6ebe0013b393fb58ca391ed6ffa5081abde07dd3 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 16,854 | py | # 2016.02.14 12:39:07 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/CamouflageInterface.py
import BigWorld
import functools
from datetime import timedelta
from math import ceil
import time
from CurrentVehicle import g_currentVehicle
from constants import IGR_TYPE
from debug_utils import LOG_DEBUG
from gui import SystemMessages, g_tankActiveCamouflage
import gui
from gui.Scaleform.daapi.view.lobby.customization.BaseTimedCustomizationInterface import BaseTimedCustomizationInterface
from gui.Scaleform.daapi.view.lobby.customization.data_providers import CamouflageGroupsDataProvider, CamouflagesDataProvider, CamouflageRentalPackageDataProvider
from gui.Scaleform.daapi.view.lobby.customization import CustomizationHelper
from gui.Scaleform.genConsts.CUSTOMIZATION_ITEM_TYPE import CUSTOMIZATION_ITEM_TYPE
from gui.Scaleform.locale.MENU import MENU
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.shared import g_itemsCache
from gui.shared.utils.HangarSpace import g_hangarSpace
from helpers import i18n, time_utils
from items import vehicles
from items.vehicles import CAMOUFLAGE_KINDS
class CamouflageInterface(BaseTimedCustomizationInterface):
def __init__(self, name, nationId, type, position):
super(CamouflageInterface, self).__init__(name, nationId, type, position)
self.currentItemsByKind = {}
self.indexToKind = {}
self.resetCurrentItems()
def resetCurrentItems(self):
for k, v in CAMOUFLAGE_KINDS.iteritems():
self.setCurrentItem(v, None, None, None, None)
self.indexToKind[v] = k
return
def setCurrentItem(self, kindIdx, ID, lifeCycle, newItemID, packageIdx):
self.currentItemsByKind[kindIdx] = {'id': ID,
'lifeCycle': lifeCycle,
'newItemID': newItemID,
'packageIdx': packageIdx}
def __del__(self):
LOG_DEBUG('CamouflageInterface deleted')
def getRentalPackagesDP(self):
dp = CamouflageRentalPackageDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageRentalPackageDP)
return dp
def getGroupsDP(self):
dp = CamouflageGroupsDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageGroupsDataProvider)
return dp
def getItemsDP(self):
dp = CamouflagesDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageDP)
return dp
def getItemPriceFactor(self, vehType):
return g_itemsCache.items.shop.getVehCamouflagePriceFactor(vehType.compactDescr)
def isNewItemIGR(self):
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
return self._itemsDP.isIGRItem(item.get('newItemID'))
return False
def getItemDefaultPriceFactor(self, vehType):
return g_itemsCache.items.shop.defaults.getVehCamouflagePriceFactor(vehType.compactDescr)
def refreshViewData(self, vehType, refresh = False):
if vehType is not None:
self._groupsDP.buildList()
self._itemsDP.setVehicleTypeParams(self.getItemPriceFactor(vehType), self.getItemDefaultPriceFactor(vehType), self.currentItemsByKind.get(CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup, 0), {'id': None}).get('id'))
self._rentalPackageDP.refreshList()
return
def invalidateViewData(self, vehType, refresh = False):
if vehType is not None:
self._groupsDP.buildList()
self._itemsDP.setVehicleTypeParams(self.getItemPriceFactor(vehType), self.getItemDefaultPriceFactor(vehType), self.currentItemsByKind.get(0, {'id': None}).get('id'))
self._rentalPackageDP.getRentalPackages(refresh)
return
def isNewItemSelected(self):
return self.getSelectedItemsCount() > 0
def getNewItems(self):
newItems = None
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
if newItems is None:
newItems = []
newItems.append(self._itemsDP.makeItem(item.get('newItemID'), False, None, None, kind))
return newItems
def getSelectedItemCost(self):
newItemsCosts = [ self.getItemCost(item.get('newItemID'), item.get('packageIdx')) for kind, item in self.currentItemsByKind.iteritems() if item.get('newItemID') is not None ]
return newItemsCosts
def getSelectedItemsCount(self, *args):
if len(args):
newItems = []
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
cost = self.getItemCost(item.get('newItemID'), item.get('packageIdx'))
if cost.get('isGold') == args[0]:
newItems.append(item)
else:
newItems = [ item for kind, item in self.currentItemsByKind.iteritems() if item.get('newItemID') is not None ]
return len(newItems)
def isCurrentItemRemove(self):
currentItems = []
for kind, item in self.currentItemsByKind.iteritems():
if item.get('id') is not None and item.get('newItemID') is not None and item.get('lifeCycle', (0, 0))[1] > 0:
currentItems.append(item)
return len(currentItems) > 0
def getCurrentItemRemoveStr(self):
removeStr = None
for kind, item in self.currentItemsByKind.iteritems():
lifeCycle = item.get('lifeCycle')
if item.get('id') is not None and item.get('newItemID') and lifeCycle is not None:
if removeStr is None:
removeStr = []
if lifeCycle[1] > 0:
removeStr.append(gui.makeHtmlString('html_templates:lobby/customization', 'remove-camouflage-{0}'.format(kind)))
else:
removeStr.append(gui.makeHtmlString('html_templates:lobby/customization', 'store-camouflage-{0}'.format(kind)))
return removeStr
def getCurrentItem(self):
space = g_hangarSpace.space
if space is not None:
space.locateCameraToPreview()
items = []
for key, item in self.currentItemsByKind.iteritems():
items.append(self._itemsDP.makeItem(item.get('id'), True, item.get('lifeCycle'), self._makeTimeLeftString(item=item), key))
return items
def onSetID(self, itemID, kind, packageIdx):
item = self.currentItemsByKind.get(kind)
if itemID == -1:
item['newItemID'] = None
else:
if item.get('id') == itemID:
item['newItemID'] = None
else:
item['newItemID'] = itemID
item['packageIdx'] = packageIdx
self.updateVehicleCustomization(itemID)
return
def _onRentalPackagesDataInited(self, selectedPackage, refresh):
if selectedPackage:
self._itemsDP.setDefaultCost(selectedPackage.get('cost'), selectedPackage.get('defCost'), selectedPackage.get('isGold'), selectedPackage.get('isIGR'), selectedPackage.get('periodDays'))
if refresh:
for kind, item in self.currentItemsByKind.iteritems():
item['newItemID'] = None
self._rentalPackageDP.refresh()
self._itemsDP.refresh()
LOG_DEBUG('CamouflageInterface data inited', self._name)
self.onDataInited(self._name)
return
def _makeTimeLeftString(self, **kwargs):
result = ''
item = kwargs.get('item')
if item.get('lifeCycle') is not None:
startTime, days = item.get('lifeCycle')
if days > 0:
timeLeft = startTime + days * 86400 - time.time()
if timeLeft > 0:
delta = timedelta(0, timeLeft)
if delta.days > 0:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_CAMOUFLAGE_TIMELEFT_DAYS, delta.days + 1 if delta.seconds > 0 else delta.days)
else:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_CAMOUFLAGE_TIMELEFT_HOURS, ceil(delta.seconds / 3600.0))
else:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_TIMELEFT_LASTMINUTE)
return result
def updateVehicleCustomization(self, itemID = None):
space = g_hangarSpace.space
if space is not None and g_currentVehicle.isInHangar():
space.updateVehicleCamouflage(camouflageID=itemID)
return
def fetchCurrentItem(self, vehDescr):
if vehDescr is not None:
camouflages = vehDescr.camouflages
if camouflages is not None:
for camouflage in camouflages:
itemId, startTime, days = camouflage
if itemId is not None:
lifeCycle = None if itemId is None else (time_utils.makeLocalServerTime(startTime), days)
camouflageObject = self._itemsDP.getCamouflageDescr(itemId)
self.setCurrentItem(camouflageObject.get('kind'), itemId, lifeCycle, None, self._rentalPackageDP.getIndexByDays(days, self._itemsDP.isIGRItem(itemId)))
return
def change(self, vehInvID, section, isAlreadyPurchased):
if self._rentalPackageDP.selectedPackage is None:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DAYS_NOT_SELECTED)
self.onCustomizationChangeFailed(message)
return
else:
isNewItemFound = False
for kind, item in self.currentItemsByKind.iteritems():
newItemID = item.get('newItemID', None)
currItemId = item.get('id', None)
if newItemID is None:
continue
elif not isNewItemFound:
isNewItemFound = True
price = self.getItemCost(newItemID, item.get('packageIdx'))
cost = price.get('cost', 0)
isGold = price.get('isGold', False)
if cost < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_COST_NOT_FOUND)
self.onCustomizationChangeFailed(message)
return
localKind = kind
if CustomizationHelper.isItemInHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE, newItemID, self._nationID):
hangarItem = CustomizationHelper.getItemFromHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE_TYPE, newItemID, self._nationID)
daysToWear = 0 if hangarItem.get('isPermanent') else 7
else:
daysToWear = self._rentalPackageDP.pyRequestItemAt(item.get('packageIdx')).get('periodDays')
newIdToSend = 0
isNewInDefaultSetup = False
isCurrIgr = self._itemsDP.isIGRItem(currItemId)
if isCurrIgr:
isNewInDefaultSetup = CustomizationHelper.isIdInDefaultSetup(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE, newItemID)
if currItemId is None or not isCurrIgr or isCurrIgr and not isNewInDefaultSetup or isCurrIgr and isNewInDefaultSetup and daysToWear > 0:
newIdToSend = newItemID
BigWorld.player().inventory.changeVehicleCamouflage(vehInvID, localKind, newIdToSend, daysToWear, functools.partial(self.__onChangeVehicleCamouflage, (cost, isGold), localKind))
if not isNewItemFound:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_NOT_SELECTED)
self.onCustomizationChangeFailed(message)
return
def drop(self, vehInvID, kind):
if self.currentItemsByKind.get(kind) is None:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_NOT_FOUND_TO_DROP)
self.onCustomizationDropFailed(message)
return
else:
BigWorld.player().inventory.changeVehicleCamouflage(vehInvID, kind, 0, 0, lambda resultID: self.__onDropVehicleCamouflage(resultID, kind))
return
def update(self, vehicleDescr):
camouflages = vehicleDescr.camouflages
isUpdated = False
for index, camouflage in enumerate(camouflages):
camouflageID = camouflage[0] if camouflage is not None else None
item = self.currentItemsByKind[index]
if camouflageID != item.get('id'):
isUpdated = True
item['id'] = camouflageID
if camouflage is not None:
_, startTime, days = camouflage
startTime = time_utils.makeLocalServerTime(startTime)
item['lifeCycle'] = (startTime, days)
else:
item['lifeCycle'] = None
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == index:
self._itemsDP.currentItemID = item['id']
if isUpdated:
self.onCurrentItemChange(self._name)
return
def _populate(self):
super(CamouflageInterface, self)._populate()
def _dispose(self):
self.updateVehicleCustomization()
self.resetCurrentItems()
super(CamouflageInterface, self)._dispose()
def __onChangeVehicleCamouflage(self, price, kind, resultID):
if resultID < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SERVER_ERROR)
self.onCustomizationChangeFailed(message)
return
else:
item = self.currentItemsByKind.get(kind)
g_tankActiveCamouflage[g_currentVehicle.item.intCD] = kind
item['id'] = item.get('newItemID')
item['lifeCycle'] = None
item['newItemID'] = None
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == kind:
self._itemsDP.currentItemID = item['id']
cost, isGold = price
if cost == 0:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_FREE
typeValue = SystemMessages.SM_TYPE.Information
str = i18n.makeString(key)
else:
if isGold:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_GOLD
fCost = BigWorld.wg_getGoldFormat(cost)
typeValue = SystemMessages.SM_TYPE.CustomizationForGold
else:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_CREDITS
fCost = BigWorld.wg_getIntegralFormat(cost)
typeValue = SystemMessages.SM_TYPE.CustomizationForCredits
str = i18n.makeString(key, fCost)
self.onCustomizationChangeSuccess(str, typeValue)
return
def __onDropVehicleCamouflage(self, resultID, kind):
if resultID < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DROP_SERVER_ERROR)
self.onCustomizationDropFailed(message)
return
else:
item = self.currentItemsByKind.get(kind)
hangarItem = CustomizationHelper.getItemFromHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE_TYPE, item.get('id'), self._nationID)
if hangarItem:
intCD = g_currentVehicle.item.intCD
vehicle = vehicles.getVehicleType(int(intCD))
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_STORED_SUCCESS, vehicle=vehicle.userString)
else:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DROP_SUCCESS)
if g_tankActiveCamouflage.has_key(g_currentVehicle.item.intCD):
del g_tankActiveCamouflage[g_currentVehicle.item.intCD]
newID = None
newLifeCycle = None
if gui.game_control.g_instance.igr.getRoomType() != IGR_TYPE.NONE:
camouflages = g_currentVehicle.item.descriptor.camouflages
camo = camouflages[kind]
if camo[0] is not None:
newID = camo[0]
newLifeCycle = (camo[1], camo[2])
item['id'] = newID
item['lifeCycle'] = newLifeCycle
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == kind:
self._itemsDP.currentItemID = newID
self.onCustomizationDropSuccess(message)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\customization\camouflageinterface.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:39:08 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
bd34cba713330f49ee188c81630715aa396f5f11 | 13c3028d52bdb7a647921b52367a9c804e07cc25 | /epyseg/gui/preview.py | fcb5b0ef61019349c9a1db85bc7000fb0a38a4e4 | [
"Apache-2.0",
"MPL-2.0",
"HPND",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | baigouy/EPySeg | 8b1a062f5e88405ca07bd33c6427686182bdae9d | 5ce46ce981c7607c74d9a8f82ef942b207bdb210 | refs/heads/master | 2022-07-26T10:30:41.056239 | 2022-05-24T13:03:50 | 2022-05-24T13:03:50 | 272,421,806 | 19 | 5 | BSD-3-Clause | 2022-05-23T08:27:41 | 2020-06-15T11:34:48 | Python | UTF-8 | Python | false | false | 7,311 | py | from PyQt5.QtCore import QRect, Qt, QRectF
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QScrollArea
from epyseg.draw.shapes.square2d import Square2D
from epyseg.draw.widgets.paint import Createpaintwidget
from epyseg.img import Img
from epyseg.draw.shapes.rect2d import Rect2D
import sys
# in fact that is maybe already what I want!!!
# but I may also want to draw on it with a pen --> should have everything
class crop_or_preview(QWidget):
def __init__(self, parent_window=None, preview_only=False):
super().__init__(parent=parent_window)
self.scale = 1.0
self.x1 = self.x2 = self.y1 = self.y2 = None
self.preview_only = preview_only
self.initUI()
def initUI(self):
layout = QGridLayout()
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.paint = Createpaintwidget()
self.paint.vdp.active = True
self.paint.vdp.drawing_mode = True
if not self.preview_only:
self.paint.vdp.shape_to_draw = Rect2D
self.scrollArea = QScrollArea()
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setWidget(self.paint)
self.paint.scrollArea = self.scrollArea
self.setMouseTracking(not self.preview_only)
self.paint.setMouseTracking(not self.preview_only) # KEEP IMPORTANT
self.paint.mouseMoveEvent = self.mouseMoveEvent
self.paint.mousePressEvent = self.mousePressEvent
self.paint.mouseReleaseEvent = self.mouseReleaseEvent
self.prev_width = 192
self.prev_height = 192
self.scrollArea.setGeometry(QRect(0, 0, self.prev_width, self.prev_height))
self.setGeometry(QRect(0, 0, self.prev_width, self.prev_height))
self.setFixedSize(self.size())
layout.addWidget(self.scrollArea)
self.setLayout(layout)
def set_image(self, img):
self.paint.vdp.shapes.clear()
self.paint.setImage(img) # bug is here
if img is None:
self.paint.scale = self.scale = self.paint.vdp.scale = 1.
else:
max_size = min(self.prev_width / img.get_width(), self.prev_height / img.get_height())
self.paint.scale = self.scale = self.paint.vdp.scale = max_size
if self.paint.image is not None:
self.paint.resize(self.scale * self.paint.image.size())
self.scrollArea.resize(self.scale * self.paint.image.size())
def mousePressEvent(self, event):
self.paint.vdp.shapes.clear()
if self.paint.vdp.active:
self.paint.vdp.mousePressEvent(event)
if self.paint.vdp.currently_drawn_shape is not None:
self.paint.vdp.currently_drawn_shape.stroke = 3 / self.scale
self.update()
def mouseMoveEvent(self, event):
self.paint.vdp.mouseMoveEvent(event)
region = self.scrollArea.widget().visibleRegion()
self.paint.update(region)
def mouseReleaseEvent(self, event):
if self.paint.vdp.active:
try:
self.paint.vdp.mouseReleaseEvent(event)
self.update() # required to update drawing
self.update_ROI()
except:
pass
def set_square_ROI(self, bool):
if bool:
self.paint.vdp.shape_to_draw = Square2D
else:
self.paint.vdp.shape_to_draw = Rect2D
def setRoi(self, x1, y1, x2, y2):
# if x1 is None and y1 is None and x2 is None and y2 is None:
# self.paint.vdp.shapes.clear()
# self.paint.update()
# self.update() # required to update drawing
# self.update_ROI()
# return
if x1 != x2 and y1 != y2:
# TODO add controls for size of ROI --> TODO but ok for now
self.paint.vdp.shapes.clear()
if x1 is not None and y1 is not None:
rect2d = Rect2D(x1, y1, x2 - x1, y2 - y1)
rect2d.stroke = 3 / self.scale
self.paint.vdp.shapes.append(rect2d)
self.paint.vdp.currently_drawn_shape = None
self.paint.update()
self.update() # required to update drawing
self.update_ROI()
else:
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.paint.update()
self.update()
# self.paint.vdp.update()
# self.paint.vdp.currently_drawn_shape.stroke = 3 / self.scale
# self.x1 = x1
# self.x2 = x2
# self.y1 = y1
# self.y2 = y2
def update_ROI(self):
try:
rect = self.paint.vdp.shapes[0]
x1 = rect.x()
y1 = rect.y()
x2 = rect.x() + rect.width()
y2 = rect.y() + rect.height()
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 < 0:
x2 = 0
if y2 < 0:
y2 = 0
if rect.width() >= self.paint.image.size().width():
x2 = self.paint.image.size().width()
if rect.height() >= self.paint.image.size().height():
y2 = self.paint.image.size().height()
if x1 > x2:
tmp = x2
x2 = x1
x1 = tmp
if y1 > y2:
tmp = y2
y2 = y1
y1 = tmp
if x1 == x2:
x1 = x2 = None
if y1 == y2:
y1 = y2 = None
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
except:
self.x1 = self.x2 = self.y1 = self.y2 = None
def get_crop_parameters(self):
if self.paint.vdp.shapes:
self.update_ROI()
if self.x1 is None:
if self.x2 is not None:
return {'x1': self.x1, 'y1': self.y1, 'w': int(self.x2), 'h': int(self.y2)}
else:
return None
return {'x1': int(self.x1), 'y1': int(self.y1), 'x2': int(self.x2), 'y2': int(self.y2)}
if __name__ == '__main__':
# ok in fact that is already a great popup window --> can I further improve it ???
# just for a test
app = QApplication(sys.argv)
ex = crop_or_preview()
# ex = crop_or_preview(preview_only=True)
# img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/11.png')
# img = Img('/home/aigouy/mon_prog/Python/Deep_learning/unet/data/membrane/test/122.png')
# img = Img('/home/aigouy/mon_prog/Python/data/3D_bicolor_ovipo.tif')
# img = Img('/home/aigouy/mon_prog/Python/data/Image11.lsm')
# img = Img('/home/aigouy/mon_prog/Python/data/lion.jpeg')
# img = Img('/home/aigouy/mon_prog/Python/data/epi_test.png')
img = Img('/E/Sample_images/sample_images_PA/trash_test_mem/mini10_fake_swaps/focused_Series012.png')
ex.set_image(img)
# test = QRectF(None, None, 128, 128)
ex.setRoi(None, None, 128, 256)
# ex.set_image(None)
ex.show()
app.exec_()
print(ex.get_crop_parameters())
| [
"baigouy@gmail.com"
] | baigouy@gmail.com |
b73d1826be68e566cc4418a478ee654d378cc0a6 | 073d40d3ea58e37d8a130794910068005f3f259d | /processing/surface_based_analysis.py | 56afba929f609a17760fcae36ccf26cd024a0541 | [
"BSD-2-Clause"
] | permissive | KamalakerDadi/public_analysis_code | bd925f442d32fbedc56e145ad0bc981d5ac3924c | b8770d485fd2697838b911120c41d91250671636 | refs/heads/master | 2020-03-20T21:10:33.759118 | 2018-07-30T18:27:10 | 2018-07-30T18:27:10 | 137,727,239 | 0 | 0 | null | 2018-06-18T08:27:58 | 2018-06-18T08:27:58 | null | UTF-8 | Python | false | false | 5,182 | py | """
This script does 2 things:
1. Freesurfer segmentation
2. project the coregistered fMRI images to the surface:
the surface is the grey-white matter interface of the subject
The purpose is to perform proper group analysis on the surface on fsaverage,
and use existing atlases on the surface.
Author: Bertrand Thirion, Isabelle Courcol, 2013 -- 2016
Note
----
First run: export SUBJECTS_DIR=''
"""
import os
import glob
import commands
from nipype.caching import Memory
from joblib import Parallel, delayed
from nipype.interfaces.freesurfer import ReconAll, BBRegister
work_dir = '/neurospin/ibc/derivatives'
subjects = ['sub-%02d' % i for i in [1, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
subjects = ['sub-%02d' % i for i in [8, 9, 11, 12, 13, 14]]
mem = Memory(base_dir='/neurospin/tmp/ibc')
# Step 1: Perform recon-all
os.environ['SUBJECTS_DIR'] = ''
def recon_all(work_dir, subject, high_res=True):
# create directories in output_dir
if high_res:
# high-resolution T1
anat_img = glob.glob(os.path.join(
work_dir, subject, 'ses-*/anat/sub-*_ses-*_acq-highres_T1w.nii*'))[0]
print(anat_img)
t1_dir = os.path.dirname(anat_img)
os.system('recon-all -all -subjid %s -sd %s -hires -i %s -expert expert.opts' % (subject, t1_dir, anat_img))
else:
# low-resolution T1
subject_dir = os.path.join(work_dir, subject, 'ses-00')
t1_dir = os.path.join(subject_dir, 'anat')
anat_img = glob.glob(os.path.join(t1_dir, '%s_ses-00_T1w.nii*' % subject))[0]
# reconall = mem.cache(ReconAll)
#reconall(subject_id=subject,
# directive='all',
# subjects_dir=t1_dir,
# T1_files=anat_img)
os.system('recon-all -all -subjid %s -sd %s' % (subject, t1_dir))
#Parallel(n_jobs=1)(delayed(recon_all)(work_dir, subject, True)
# for subject in subjects)
# Step 2: Perform the projection
def project_volume(work_dir, subject, sessions, do_bbr=True):
t1_dir = os.path.join(work_dir, subject, 'ses-00', 'anat')
for session in sessions:
subject_dir = os.path.join(work_dir, subject, session)
if not os.path.exists(subject_dir):
continue
fmri_dir = os.path.join(subject_dir, 'func')
fs_dir = os.path.join(subject_dir, 'freesurfer')
fmri_images = glob.glob(os.path.join(fmri_dir, 'rdc*.nii.gz'))
# --------------------------------------------------------------------
# run the projection using freesurfer
os.environ['SUBJECTS_DIR'] = t1_dir
if not os.path.exists(fs_dir):
os.mkdir(fs_dir)
# take the fMRI series
print("fmri_images", fmri_images)
for fmri_session in fmri_images:
basename = os.path.basename(fmri_session).split('.')[0]
print (basename)
# output names
# the .gii files will be put in the same directory as the input fMRI
left_fmri_tex = os.path.join(fs_dir, basename + '_lh.gii')
right_fmri_tex = os.path.join(fs_dir, basename + '_rh.gii')
if do_bbr:
# use BBR registration to finesse the coregistration
bbreg = BBRegister(subject_id=subject, source_file=fmri_session,
init='header', contrast_type='t2')
bbreg.run()
# run freesrufer command for projection
regheader = os.path.join(fmri_dir, basename + '_bbreg_%s.dat' % subject)
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi lh --projfrac-avg 0 2 0.1'
% (fmri_session, left_fmri_tex, regheader)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi rh --projfrac-avg 0 2 0.1'
% (fmri_session, right_fmri_tex, regheader)))
# resample to fsaverage
left_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_lh.gii')
right_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_rh.gii')
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsurfval %s --trgsubject ico --trgicoorder 7 '\
'--hemi lh --nsmooth-out 5' %
(subject, left_fmri_tex, left_fsaverage_fmri_tex)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsubject ico --trgicoorder 7 --trgsurfval %s '\
'--hemi rh --nsmooth-out 5' %
(subject, right_fmri_tex, right_fsaverage_fmri_tex)))
from pipeline import get_subject_session
subject_sessions = sorted(get_subject_session('enumeration'))
Parallel(n_jobs=4)(
delayed(project_volume)(work_dir, subject_session[0], [subject_session[1]], do_bbr=True)
for subject_session in subject_sessions)
| [
"bertrand.thirion@inria.fr"
] | bertrand.thirion@inria.fr |
9aa9799855d7fe837b31652aa8140412a8ac1779 | 911117a97349f26be8f72e3c7b84b8fce8de265a | /mysite/migrations/0002_maker_pmodel_pphoto_product1.py | 43c655b7986ca5b361ab49240feb8b34d7b8cf7b | [] | no_license | tlsvie/PMSYS | 99cd7d6fb58819699e6a9ca44e4366686ecad52c | e13fef6bde7eeef7bc292b1f09c9d5efeedf4c1c | refs/heads/master | 2020-03-24T07:33:40.582690 | 2018-08-05T12:10:21 | 2018-08-05T12:10:21 | 142,568,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | # Generated by Django 2.1b1 on 2018-07-27 05:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mysite', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Maker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('country', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='PModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('url', models.URLField(default='http://www.baidu.com')),
('maker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mysite.Maker')),
],
),
migrations.CreateModel(
name='PPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='产品图片', max_length=20)),
('url', models.URLField(default='http://www.baidu.com')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mysite.Product')),
],
),
migrations.CreateModel(
name='Product1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(default='二手机', max_length=15)),
('description', models.TextField(default='暂无说明')),
('year', models.PositiveIntegerField(default=2018)),
('price', models.PositiveIntegerField(default=0)),
('pmodel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mysite.PModel')),
],
),
]
| [
"hirenzhao@hotmail.com"
] | hirenzhao@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.