blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73c381a2286a4c3eea64c1b22a9c5b3837ec8992 | deb6f0cb8f18c57de1bb6d31bdee293db7273796 | /DIABETES/urls.py | 2be7a8b7bae42caaeb584d2577d17639a65ff03c | [] | no_license | pratikrj1601/Health-Monitoring-using-Machine-learning | abf722fe1185560fd53e95ff2368f683a807e569 | 046b2e4863d0d64614278870be04727287e6f916 | refs/heads/master | 2023-09-02T03:00:09.308417 | 2021-11-12T12:09:15 | 2021-11-12T12:09:15 | 427,341,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.diabetes, name="diabetes"),
path('/prediction', views.predict, name="predict"),
]
| [
"noreply@github.com"
] | noreply@github.com |
6fa8dff4fc69d605b416ee01c4cf493f73020b64 | 86b4e40a0e9ad265c49e9287af5ebf6d0871987d | /Valid Parentheses/code.py | a99a498bb4d2e4fa2ac3f7b37c221da074ec8ed4 | [] | no_license | Jason-Woo/leetcode_problemset | 52abe06ae554c77213d7cad824310517b64a9fb0 | 8c22875b458dd8720e4c43caf70a0a6c7a5fdb6b | refs/heads/master | 2023-03-27T12:27:52.099188 | 2021-03-10T22:51:58 | 2021-03-10T22:51:58 | 263,157,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | class Solution(object): #Atten
def isValid(self, s):
left, right, stack = "({[", ")}]", []
for item in s:
if item in left:
stack.append(item)
else:
if not stack or left.find(stack.pop()) != right.find(item):
return False
return not stack | [
"4everjasonwoo@gmail.com"
] | 4everjasonwoo@gmail.com |
458197f8501b21000576d09954086272fbca03f4 | acd2ce470c68d3f407c9bc022a6d115fdb962713 | /coroutine/iterable.py | b71c2cf5edf996c490b82c866d92ed51d46a6d13 | [] | no_license | wwd-0/python-base | 080ff0065eebc8209b7c4079a44a2726a3c67c63 | c4f900ce7cd735a2f57ebc88cfde7ffcfb437444 | refs/heads/master | 2022-06-26T15:01:29.718456 | 2020-05-10T12:36:09 | 2020-05-10T12:36:09 | 262,716,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #coding=utf-8
from collections import Iterable
import time
class ClassMate(object):
def __init__(self):
self.names = list()
self.current_num = 0
def add(self,name):
self.names.append(name)
def __iter__(self):
return self
def __next__(self):
if self.current_num < len(self.names):
ret = self.names[self.current_num]
self.current_num += 1
return ret
else:
raise StopIteration
if __name__ == "__main__":
classmate = ClassMate()
classmate.add("老王")
classmate.add("老张")
classmate.add("老李")
for name in classmate:
print(name)
time.sleep(1)
| [
"wwd@localhost.localdomain"
] | wwd@localhost.localdomain |
ccfd614cc9917c7b23572f95c1d1f85958ab5721 | d17d407c0e8d05a3cbc1b390e7d1d6f8a4cfe2ad | /rango/models.py | 35d46db39c80a42ee1c0b2dc36bdc55f4387e4bd | [] | no_license | sp6pe/tango_with_django_project | 75400654f8cec0772a2fd32a57f17163f1de1197 | a6cb1cd61e07bd857751bfe8d9689219059f9a76 | refs/heads/master | 2020-07-20T12:43:30.408047 | 2015-07-31T14:17:58 | 2015-07-31T14:17:58 | 39,037,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
likes = models.IntegerField(default=0)
views = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
def __unicode__(self): #For Python 2, use __str__ on Python 3
return self.name
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length =128)
url = models.URLField()
views = models.IntegerField(default=0)
def __unicode__(self):
return self.title
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
website = models.URLField(blank=True)
image = models.ImageField(upload_to = 'profile_images', blank = True)
# Override the __unicode__() method to return out something meaningful!
def __unicode__(self):
return self.user.username
| [
"sp6pe@virginia.edu"
] | sp6pe@virginia.edu |
c283385eb0e04fff394a718c3a315e144b525bb9 | 35349ee872a16cc1e286c71d09496fc5e383efb5 | /svm_with_csk_svm.py | bfae4faf1bf402a13abcfef1923b6e9ca9cd705b | [] | no_license | aipjn/Dissertation_project | 3593d1e4524dee54f783f4f7eafdd4c8820a8ee1 | 1df15d24a632e092ba8146f885d27c6452f4a4a1 | refs/heads/master | 2020-03-22T22:10:53.872773 | 2018-09-10T00:06:11 | 2018-09-10T00:06:11 | 140,737,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | """\
------------------------------------------------------------
USE: Using common sense to update SVM model
------------------------------------------------------------\
"""
import time
from utils.dataset import Dataset
from utils.utils import stemming, vocabulary
from utils.evaluation import Evaluation
from utils.bing import search
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from baseline_svm import train as svm_train
from baseline_svm import test as svm_test
from baseline_svm import similarities
from baseline_svm import q_a_similarities
from baseline_svm import extract_feature
from sklearn import svm
# prepare data
begin = time.time()
data = Dataset()
vocab = vocabulary(data.trainset)
low_pro = 0.5
low_diff = 0.05
def use_csk(data, predicty, model):
index = 0
count = 0
data_c = []
for instance in data:
for question in instance['questions']:
if (predicty[index][0] < low_pro and predicty[index + 1][0] < low_pro):
count += 1
texts = search(question['question'])
if texts == '':
continue
# print("texts finish")
ques = stemming(question['question'])
ans1 = stemming(question['answers'][0]['answer'])
ans2 = stemming(question['answers'][1]['answer'])
data_c.append([index, ques, ans1, ans2, texts])
index += 2
print("changed num", count)
# update result
for value in data_c:
key = value[0]
ans1_pros = [predicty[key][0]]
ans2_pros = [predicty[key + 1][0]]
ques = value[1]
ans1 = value[2]
ans2 = value[3]
texts = value[4]
X = []
for text in texts:
# print(len(text))
text = text[0:100000]
text = stemming(text)
if text == '' or len(text.split()) < 30:
continue
X.append(extract_feature(text, ans1) + similarities(text, ans1, ques) + q_a_similarities(ans1, ques))
X.append(extract_feature(text, ans2) + similarities(text, ans2, ques) + q_a_similarities(ans2, ques))
if len(X) == 0:
continue
result = model.predict_proba(X)
i = 0
while i < len(result):
ans1_pros.append(result[i][0])
ans2_pros.append(result[i + 1][0])
i += 2
if sorted(ans1_pros, reverse=True)[0] > sorted(ans2_pros, reverse=True)[0]:
predicty[key][0] = 1
predicty[key + 1][0] = 0
else:
predicty[key][0] = 0
predicty[key + 1][0] = 1
print("update")
return predicty
# train
model = svm.SVC(gamma=10, probability=True)
svm_train(data.trainset, model, 1470)
y, predicty = svm_test(data.testset, model)
eval1 = Evaluation()
eval1.accuracy(y, predicty, data)
with open('result_svm.txt', 'w') as f:
for index, maxd in enumerate(eval1.wrong):
f.write("Case #{}: {} ".format(index + 1, maxd) + '\n')
# predicty=[[0.1], [0.2], [0.1], [0.2], [0.1], [0.2]]
predicty = use_csk(data.testset, predicty, model)
# Evaluation
eval = Evaluation()
eval.accuracy(y, predicty, data)
final = time.time()
print("time:", final - begin) | [
"1058134934@qq.com"
] | 1058134934@qq.com |
ab312889eb22efbe7f28a5feb579980812890a7a | 025eaa18456cebaa08e95b33f6e85474665feb1a | /NSC_Script.py | 47a2093aae048545cdaeefb370f1fa8f34e659f8 | [] | no_license | Rlopezra/NSC-Student-Tracker-Reduction-Script | 3f156179bd4e652a0584b8ba4145e577ee737404 | c13d30ad9335c2961d3d0b9edc402e695a1083ab | refs/heads/master | 2021-09-05T20:38:53.205100 | 2018-01-30T21:31:16 | 2018-01-30T21:31:16 | 111,505,980 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | import pandas as pd
#This line loads the csv into a pandas data frame, you'll have to change it to the location of your NSC file
data = pd.read_csv("C:/Users/rjlopez/Desktop/EVC Tracker.csv")
#formatting the dates columns to proper date format, some interpreters will change these columns to a number if we don't format the columns
data['Enrollment Begin'] = pd.to_datetime(data['Enrollment Begin'], format='%Y%m%d')
data['Enrollment End'] = pd.to_datetime(data['Enrollment End'], format='%Y%m%d')
data['Graduation Date'] = pd.to_datetime(data['Graduation Date'], format='%Y%m%d')
#Grouping all the records by student ID and college
#Change all instances of 'Requester Return Field' to 'Your Unique Identifier' if student IDs are in the 'Your Unique Identifies' column
group = data.groupby(['Requester Return Field', 'College Code/Branch'], as_index=False)
#Finding the earliest and latest enrollment date at each college
groupmin = group['Enrollment Begin'].min()
groupmax= group['Enrollment End'].max()
#Renaming the columns
groupmin = groupmin.rename( columns={"Enrollment Begin": "Earliest Enrollment"})
groupmax = groupmax.rename( columns={"Enrollment End": "Latest Enrollment"})
#Joining the earliest and latest enrollment date at each institution
test = pd.merge(data, groupmin, how = 'left', on = ['Requester Return Field', 'College Code/Branch'])
test = pd.merge(test, groupmax, how = 'left', on = ['Requester Return Field', 'College Code/Branch'])
#Selecting all the graduation records and dropping duplicate columns
grads = data.loc[data['Graduated?'] =='Y']
grads = grads[['Requester Return Field', 'College Code/Branch', 'Graduated?', 'Graduation Date', 'Degree Title', 'Degree Major 1', 'Degree CIP 1']]
test = test.drop(['Enrollment Begin', 'Enrollment End', 'Graduated?', 'Graduation Date', 'Degree Title', 'Degree Major 1', 'Degree CIP 1'], axis = 1)
#Joining the graduation records to the main dataset
test = pd.merge(test, grads, how = 'left', on = ['Requester Return Field', 'College Code/Branch'])
#Dropping duplicate records
final_df = test.drop_duplicates(['Requester Return Field', 'College Code/Branch'], keep='first')
#This line exports your dataframe to a csv. Change this part to name the file and the location you want the file to be placed in.
final_df.to_csv("C:/Users/rjlopez/Desktop/Evergreen Valley Tracker File.csv")
| [
"noreply@github.com"
] | noreply@github.com |
ede138d3bc51c61545e5ac67a26e7478adae7361 | 3f21aab01358400321d14ea68513f8fd5b2075e9 | /Calculate_Electricity_Bill_Example_1.py | 51c984411b92f145dc5f4d651c4aed9efd6783e2 | [] | no_license | AAKASH707/PYTHON | c3f991f3517259ee0fc14ab68a8b8bb6c01e4b70 | cadb4f4849f4a5e99647d2173cdbb8e953ad038e | refs/heads/master | 2021-03-12T05:44:12.914266 | 2021-03-02T18:18:37 | 2021-03-02T18:18:37 | 246,594,637 | 1 | 1 | null | 2021-01-25T17:48:38 | 2020-03-11T14:33:28 | Python | UTF-8 | Python | false | false | 562 | py | # ********* Python Program to Calculate Electricity Bill *********** #
units = int(input(" Please enter Number of Units you Consumed : "))
if(units < 50):
amount = units * 2.60
surcharge = 25
elif(units <= 100):
amount = 130 + ((units - 50) * 3.25)
surcharge = 35
elif(units <= 200):
amount = 130 + 162.50 + ((units - 100) * 5.26)
surcharge = 45
else:
amount = 130 + 162.50 + 526 + ((units - 200) * 8.45)
surcharge = 75
total = amount + surcharge
print("\nElectricity Bill = %.2f" %total)
| [
"noreply@github.com"
] | noreply@github.com |
f50b218007abef8fad7e58bf1ef26cba7d9cde1b | c634c8dd46673eed049631c95527a4174a822198 | /ProductionScripts/ConfigFiles/FullSim/DIGI2_cfg.py | b2f38e155310cb34030c2716f4fd4bfec1cb4fb7 | [] | no_license | nicolastonon/EFTSimPheno | ad79bd721a519405b97db83040c6ee8e1cf63e96 | fb6c068611b3b746d72108bf76080297b68b76fd | refs/heads/master | 2023-07-25T08:37:13.170427 | 2021-09-06T16:49:28 | 2021-09-06T16:49:28 | 228,392,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --filein file:DIGI1.root --fileout file:DIGI2.root --python_filename DIGI2_cfg.py --mc --eventcontent AODSIM --runUnscheduled --datatier AODSIM --conditions 94X_mc2017_realistic_v11 --step RAW2DIGI,RECO,RECOSIM,EI --nThreads 8 --era Run2_2017 --no_exec
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('RECO',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.RecoSim_cff')
process.load('CommonTools.ParticleFlow.EITopPAG_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:DIGI1.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:-1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.AODSIMoutput = cms.OutputModule("PoolOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(4),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('AODSIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(31457280),
fileName = cms.untracked.string('file:DIGI2.root'),
outputCommands = process.AODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '94X_mc2017_realistic_v11', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.reconstruction_step = cms.Path(process.reconstruction)
process.recosim_step = cms.Path(process.recosim)
process.eventinterpretaion_step = cms.Path(process.EIsequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.AODSIMoutput_step = cms.EndPath(process.AODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.reconstruction_step,process.recosim_step,process.eventinterpretaion_step,process.endjob_step,process.AODSIMoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
# process.options.numberOfThreads=cms.untracked.uint32(1)
# process.options.numberOfStreams=cms.untracked.uint32(0)
#do not add changes to your config after this point (unless you know what you are doing)
from FWCore.ParameterSet.Utilities import convertToUnscheduled
process=convertToUnscheduled(process)
# Customisation from command line
#Have logErrorHarvester wait for the same EDProducers to finish as those providing data for the OutputModule
from FWCore.Modules.logErrorHarvester_cff import customiseLogErrorHarvesterUsingOutputCommands
process = customiseLogErrorHarvesterUsingOutputCommands(process)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"nicolas.tonon@cern.ch"
] | nicolas.tonon@cern.ch |
2dba7604593e623d6fd2c527a1c032919cf5f2e2 | 5ea45cbeeb59353282bb5bfc070ad3f4b2c4630f | /snippets/views.py | d6372e628b3b5a9b015e51f3f9cef4f320251ae6 | [
"MIT"
] | permissive | tomaszd/rest_django | 4289c7309f83d5e6300062954be00792229f3a5d | 3366406e3d96b6dfe30a5388aba6de0498cabbca | refs/heads/main | 2023-02-11T02:14:48.658417 | 2021-01-09T01:10:10 | 2021-01-09T01:10:10 | 328,037,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | from rest_framework import generics, viewsets
from rest_framework import permissions
from rest_framework import renderers
from rest_framework.decorators import api_view, action
from rest_framework.response import Response
from rest_framework.reverse import reverse
from snippets.models import Snippet
from snippets.permissions import IsOwnerOrReadOnly
from snippets.serializers import SnippetSerializer
from snippets.serializers import UserSerializer
from django.contrib.auth.models import User
from rest_framework import viewsets
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'snippets': reverse('snippet-list', request=request, format=format)
})
class SnippetViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
@action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `retrieve` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
| [
"tomasz.dudziec@redembedded.com"
] | tomasz.dudziec@redembedded.com |
df75da1c4cd20551725bfd015ced8926d141dbca | 62e4030268aa2835a4806864cb70055675724471 | /docs/conf.py | 1016182d33de160fe22db4afafdff318e041a7bc | [] | no_license | aisis/FoxDotCode | 291507fe16f5a56b8fed312827712db213d78e83 | 186175f76873771e13b4aa1fa714201ab98c4efe | refs/heads/master | 2021-01-15T13:35:20.524593 | 2016-03-13T13:16:44 | 2016-03-13T13:16:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,385 | py | # -*- coding: utf-8 -*-
#
# FoxDot documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 21 22:04:59 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FoxDot'
copyright = u'2016, Ryan Kirkbride'
author = u'Ryan Kirkbride'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0'
# The full version, including alpha/beta/rc tags.
release = u'1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FoxDotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FoxDot.tex', u'FoxDot Documentation',
u'Ryan Kirkbride', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'foxdot', u'FoxDot Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FoxDot', u'FoxDot Documentation',
author, 'FoxDot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Path to Python files
sys.path.insert(0,"D:\\Google Drive\\GitHub\\FoxDotCode\\")
| [
"ryankirkbride26@gmail.com"
] | ryankirkbride26@gmail.com |
49306970580db9ada7e0fe1475f50d1b04f37a57 | b66e43ad7128a62ebbfacc0cce19386e2d273090 | /image_server/get_latest_distro.py | 7d111b27e930a2c4398b7039c98bbdcb1585b417 | [] | no_license | dorkamotorka/ImageBuild | 4e22c26eed40a6dec4650c4145bd45ec3f28e40b | fb99981017cb1692cc442e3460ed519b12ad6061 | refs/heads/master | 2023-08-19T14:31:45.585481 | 2021-10-18T07:27:17 | 2021-10-18T07:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,290 | py | #!/usr/bin/env python
import sys
import xmltodict
from urllib.request import urlopen
import os
import subprocess
import dateutil.parser
from os import path
def execute(command, cwd):
proc = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
return proc.communicate()[0], proc.returncode
class accessDatabase:
def __init__(self, package):
self.url = "https://ubiquity-pi-image.sfo2.cdn.digitaloceanspaces.com/"
self.package_tag = package
self.home = "/home/ubuntu"
def connects_to_url(self):
data, code = execute('timeout 20s wget -q --spider ' + self.url, "/tmp/")
return code == 0
def fetch_latest_online(self):
# try to fetch the latest package file from web
if not self.connects_to_url():
msg = "Could not connect to " + self.url
print(msg)
return 0
try:
file = urlopen(self.url)
data = file.read()
file.close()
data = xmltodict.parse(data)["ListBucketResult"]["Contents"]
except:
print("Fetching latest: HTTP Error 503: Service Unavailable.")
execute("sudo date -s \"$(wget -qSO- --max-redirect=0 google.com 2>&1 | grep Date: | cut -d' ' -f5-8)Z\"", "/tmp/")
return 0
allpackages = []
for val in data:
allpackages.append({'date': dateutil.parser.parse(str(val["LastModified"])), 'file': str(val["Key"]),})
packages = []
for val in allpackages:
if self.package_tag in val["file"]:
packages.append(val)
if len(packages) == 0:
msg = "No package file found online with package tag: "+self.package_tag
print(msg)
return 0
newlist = sorted(packages, key=lambda k: k['date'])
latest = str(newlist[len(newlist)-1]["file"])
return latest
def is_pkg_different_then_current(self, pkg_name):
if not path.exists(self.home):
print("Directory "+self.home+" does not exist")
return -1
# Check if package already installed in the home directory
if os.path.exists(os.path.join(self.home, pkg_name)):
return False
return True
def trigger_download(self, package_name):
self.update_command_list(package_name)
execute_success, message = self.execute_command_list()
if execute_success:
print("Successfully downloaded new distribution")
else:
print("There was an error downloading update: %s", message)
return
def execute_command_list(self):
for command in self.commandlist:
# print out the message of the command
print(command[2])
# execute the comand
streamdata, code = execute(command[0], command[1])
streamdata = str((str(streamdata[1000:]) + '..') if len(streamdata) > 1000 else streamdata)
# print out the response of the command
if len(streamdata) > 0:
print(streamdata)
# if there is a error in the execution, return false
if code != 0:
msg = "Could not exectue command: " + str(command[0]) + " because: " + stretamdata
print(msg)
return False, msg
return True, "Success executing command list"
def update_command_list(self, package_name):
self.commandlist = [
# delete old zip and cyacd files in home dir
#["find . -maxdepth 1 -name '*.zip' -type f -delete", self.home, "Removing old zip file..."],
# first download to tmp and then move to home dir so package updates could not be executed with half-downloaded zip.
# also if download is stopped mid way because of network loss, the download is automatically restarted because the file was
# not dowloaded directly into ~/
["wget --no-check-certificate "+self.url+package_name, "/tmp/", "Downloading package to tmp..."],
["mv "+package_name+" "+self.home, "/tmp/", "Moving from tmp to home..."],
# unzip everything to home dir and then remove src, build devel -> so only cyacd files (and others?) remain in home dir
["unxz "+os.path.join(self.home, package_name), self.home, "Extracting package..."],
]
if __name__ == '__main__':
ad = accessDatabase('breadcrumb')
latest_fetched_package = ad.fetch_latest_online()
if latest_fetched_package == 0:
print("Could not fetch latest package from " + ad.url)
else:
print("Found package: "+latest_fetched_package+", proceeding.")
# if there was a package file fetched
if latest_fetched_package != 0:
# first compare it to the currently installed package
diff = ad.is_pkg_different_then_current(latest_fetched_package)
if diff == False:
print("The latest package is already downloaded")
sys.exit()
elif diff == True:
ad.trigger_download(latest_fetched_package)
else:
print("There was an error checking difference between installed and fetched files")
else:
print("Failed to fetch package")
| [
"tp4348@student.uni-lj.si"
] | tp4348@student.uni-lj.si |
ee8aad80ea9fe488f536a12acb866395bcbdfc70 | c26dc7928b1facac2c0912f6532076d35c19e835 | /devel/lib/python2.7/dist-packages/cob_object_detection_msgs/srv/__init__.py | 8d01ac1030bab33d482fd8bc39a91912a52446bc | [] | no_license | mattedminster/inmoov_ros | 33c29a2ea711f61f15ad5e2c53dd9db65ef6437f | e063a90b61418c3612b8df7876a633bc0dc2c428 | refs/heads/master | 2021-01-23T02:39:36.090746 | 2017-08-09T02:56:42 | 2017-08-09T02:56:42 | 85,995,826 | 0 | 0 | null | 2017-03-23T20:45:32 | 2017-03-23T20:45:32 | null | UTF-8 | Python | false | false | 309 | py | from ._AcquireObjectImage import *
from ._BaTestEnvironment import *
from ._BagTrainObject import *
from ._ComputeGraspsVacuumGripper import *
from ._DetectObjects import *
from ._SaveRecordedObject import *
from ._StartObjectRecording import *
from ._StopObjectRecording import *
from ._TrainObject import *
| [
"mattedminster@gmail.com"
] | mattedminster@gmail.com |
9184b987dcf3cf5d222de59fba7989facf1216c2 | 72a3c7f7b7b5a4a533fa1b53b03b56630779ab87 | /common/urls.py | f47028742b482f03fd1fd87b8984e253461052ba | [] | no_license | sdarbucks/pyweb | 7cb8b84b909ffa5f5dd86131cd7cfd6c01a565b7 | bfb8c92d65d252d06d05c3b3e01a7d0a63dca59e | refs/heads/main | 2023-08-25T18:58:43.250648 | 2021-10-22T01:54:05 | 2021-10-22T01:54:05 | 415,763,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
app_name = 'common'
urlpatterns = [
# 127.0.0.1:8000/common/login
path('login/', auth_views.LoginView.as_view(
template_name='common/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('signup/', views.signup, name='signup'),
] | [
"kdh797979@naver.com"
] | kdh797979@naver.com |
b6c7bc0863d3be11b0c5fdaf4028d0651061b62a | 3ee0418421955d01558b1c623def251932bcfc01 | /python-examples/marble_sort/write_json.py | b3388c9cc682286c4a2476f1d08641cbb8ddb79c | [
"MIT"
] | permissive | pep-dortmund/mindstorms | 89f426930516155bb75f52b9fdd24a0b64fc0951 | 9e6be52545e21ab8ba3bca7e1b0e64ed2320366d | refs/heads/master | 2021-01-01T19:19:26.508803 | 2017-04-29T11:39:35 | 2017-04-29T11:39:35 | 38,932,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from argparse import ArgumentParser
import zmq
import json
parser = ArgumentParser()
parser.add_argument('outputfile')
parser.add_argument('-p', '--port', type=int, default=5000)
context = zmq.Context()
socket = context.socket(zmq.REP)
def main():
args = parser.parse_args()
socket.bind('tcp://0.0.0.0:{}'.format(args.port))
events = 0
with open(args.outputfile, 'a') as f:
while True:
data = socket.recv_pyobj()
socket.send_string('ok')
events += 1
print('Events:', events)
f.write(json.dumps(data))
f.write('\n')
if __name__ == '__main__':
main()
| [
"maximilian.noethe@tu-dortmund.de"
] | maximilian.noethe@tu-dortmund.de |
56583f3316a24edddd70b4a0f9c935cbd4ceb946 | 3b79a802f8dd9f26bee0bfde4630ac0cab932803 | /srcSegcls/getEventSegDF.py | b004b92f2a243f693794a4efdb8cca0d07350ef9 | [] | no_license | qolina/Twevent | 87fc4706564088361e9db6ddc44efc10647e67fe | 4b90b0604493b20dee90448c17e0a8e0d557165e | refs/heads/master | 2021-06-24T19:06:02.022882 | 2017-08-15T05:20:09 | 2017-08-15T05:20:09 | 100,341,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,338 | py | #! /usr/bin/env python
#coding=utf-8
import time
import re
import os
import math
import cPickle
############################
## load tweetID-usrID
def loadUsrId(filepath):
usrFile = file(filepath,"r")
tweIdToUsrIdHash = cPickle.load(usrFile)
usrFile.close()
return tweIdToUsrIdHash
############################
## load event segments from file
def loadEvtseg(filePath):
unitHash = {}#segment:segmentID(count from 0)
inFile = file(filePath)
unitID = 0
while True:
lineStr = inFile.readline()
lineStr = re.sub(r'\n', ' ', lineStr)
lineStr = lineStr.strip()
if len(lineStr) <= 0:
break
contentArr = lineStr.split("\t")
unit = contentArr[2]
unitHash[unit] = unitID
unitID += 1
inFile.close()
print "### " + str(len(unitHash)) + " event " + UNIT + "s are loaded from " + inFile.name
return unitHash
############################
## getEventSegment's df
def getEventSegmentDF(dataFilePath, toolDirPath):
fileList = os.listdir(dataFilePath)
for item in sorted(fileList):
if item.find("segged") != 0:
continue
print "### Processing " + item
seggedFile = file(dataFilePath + item)
tStr = item[len(item)-2:len(item)]
print "Time window: " + tStr
eventSegFilePath = dataFilePath + "event" + UNIT + tStr
unitHash = loadEvtseg(eventSegFilePath)
eventSegDFFile = file(dataFilePath + "event" + UNIT + "DF" + tStr, "w")
unitDFHash = {} # unit:dfhash
N_t = 0
Usr_t = 0
usrHash = {}
unitUsrHash = {}
tweToUsrFilePath = toolDirPath + "tweIdToUsrId" + tStr
tweIdToUsrIdHash = loadUsrId(tweToUsrFilePath)
while True:
lineStr = seggedFile.readline()
lineStr = re.sub(r'\n', " ", lineStr)
lineStr = lineStr.strip()
if len(lineStr) <= 0:
break
contentArr = lineStr.split("\t")
tweetIDstr = contentArr[0]
tweetText = contentArr[2]
usrIDstr = tweIdToUsrIdHash[tweetIDstr]
if len(tweetText)*len(tweetIDstr) == 0:
print "Error: empty id or text: " + tweetIDstr + "#" + tweetText
exit
N_t += 1
if usrIDstr not in usrHash:
usrHash[usrIDstr] = 1
textArr = tweetText.split("|")
for segment in textArr:
wordArr = segment.split(" ")
containslang = False
if useSegmentFlag:
unit = segment
if unit not in unitHash:
continue
# segment df
df_t_hash = {}
if unit in unitDFHash:
df_t_hash = unitDFHash[unit]
df_t_hash[tweetIDstr] = 1
unitDFHash[unit] = df_t_hash
# segment users
usr_hash = {}
if unit in unitUsrHash:
usr_hash = unitUsrHash[unit]
usr_hash[usrIDstr] = 1
unitUsrHash[unit] = usr_hash
else:
for word in wordArr:
unit = word
if unit not in unitHash:
continue
# word df
df_t_hash = {}
if unit in unitDFHash:
df_t_hash = unitDFHash[unit]
df_t_hash[tweetIDstr] = 1
unitDFHash[unit] = df_t_hash
# word users
usr_hash = {}
if unit in unitUsrHash:
usr_hash = unitUsrHash[unit]
usr_hash[usrIDstr] = 1
unitUsrHash[unit] = usr_hash
if N_t % 100000 == 0:
print "### " + str(time.asctime()) + " " + str(N_t) + " tweets are processed!"
windowHash[tStr] = N_t
Usr_t = len(usrHash)
cPickle.dump(N_t, eventSegDFFile)
cPickle.dump(Usr_t, eventSegDFFile)
cPickle.dump(unitDFHash, eventSegDFFile)
cPickle.dump(unitUsrHash, eventSegDFFile)
for unit in unitDFHash:
print unit + "\t" + str(len(unitDFHash[unit]))
print "### " + str(time.asctime()) + " " + str(len(unitHash)) + " event " + UNIT + "s DF/UsrDF are calculated and writen to " + eventSegDFFile.name
seggedFile.close()
eventSegDFFile.close()
############################
## main Function
global useSegmentFlag, UNIT
print "###program starts at " + str(time.asctime())
#dataFilePath = r"../Data_hfmon/segged_qtwe/"
dataFilePath = r"../Data_hfmon/segged_ltwe/"
#dataFilePath = r"../Data_hfmon/segged_ltwe_hash/"
# use segment or word as unit
useSegmentFlag = True
if useSegmentFlag:
UNIT = "segment"
else:
UNIT = "word"
toolDirPath = r"../Tools/"
windowHash = {} # timeSliceIdStr:tweetNum
getEventSegmentDF(dataFilePath, toolDirPath)
print "###program ends at " + str(time.asctime())
| [
"qolina@gmail.com"
] | qolina@gmail.com |
c85113890b4775751eea8a0787ac818401ea92d5 | c660fdd49861211926a9dac0206d3856002ff2a8 | /smbl/prog/plugins/samtools.py | e203b8094d9a9201ecb7919fbc2f9595a2242875 | [
"MIT"
] | permissive | hermanzhaozzzz/smbl | d493a8b7ecfaf961c7ca7280d94c945a3e4e3b92 | 5922fa2fc4060d86172e991361a1cceb0af51af8 | refs/heads/master | 2021-06-23T11:27:57.869235 | 2017-08-19T02:21:51 | 2017-08-19T02:21:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import smbl
import snakemake
import os
from ._program import *
SAMTOOLS = get_bin_file_path("samtools")
##########################################
##########################################
class SamTools(Program):
@classmethod
def get_installation_files(cls):
return [
SAMTOOLS,
]
@classmethod
def install(cls):
gitdir_samtools=cls.git_clone("http://github.com/samtools/samtools","samtools")
gitdir_htslib=cls.git_clone("http://github.com/samtools/htslib","htslib")
smbl.prog.correct_samtools_make(os.path.join(gitdir_samtools,"Makefile"))
cls.run_make("samtools")
cls.install_file("samtools/samtools",SAMTOOLS)
@classmethod
def supported_platforms(cls):
return ["cygwin","osx","linux"]
| [
"karel.brinda@gmail.com"
] | karel.brinda@gmail.com |
0d1ed554911585ef093b2a5e0d0b9f8eab5a70c3 | 5962cf5c30d69b4b57d2ec598e11c3a81c6df083 | /old/frontend_mpl_basemap.py | c5619e6296c99f49301064adc4a61433adac319e | [] | no_license | sergeimoiseev/othodi_code | 1584f4006c2bddd8ddbbc6e7439b782c1f93c313 | 87f11374fc1f332752d426af4e047306aefcbd81 | refs/heads/master | 2021-01-10T04:17:45.407696 | 2016-01-26T20:23:59 | 2016-01-26T20:23:59 | 48,580,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,318 | py | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib
# matplotlib.use('nbagg')
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
# import mpld3
# matplotlib.use('nbagg')
def plot_route(coord_pairs,annotes):
# matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
MIN_L_WIDTH=10
POINT_SIZE=2*MIN_L_WIDTH
fig = plt.figure("caption",figsize=(10,10))
ax = fig.add_subplot(111)
# colors_list = cm.rainbow(np.linspace(0,1,len(coord_pairs)))
ax.plot(*zip(*coord_pairs),ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
for i, txt in enumerate(annotes):
ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points')
# ax.annotate(txt, (coord_pairs[i][0],coord_pairs[i][1]), xytext=(1,1))
ax.set_xlim([0.9*min(zip(*coord_pairs)[0]),1.1*max(zip(*coord_pairs)[0])]) # must be after plot
ax.set_ylim([0.9*min(zip(*coord_pairs)[1]),1.1*max(zip(*coord_pairs)[1])])
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# mpld3.show() # bad rendering
plt.show()
# plot_route(coord_pairs,annotations)
# plot_route(list_of_coords_pairs,annotes4points)
from mpl_toolkits.basemap import Basemap
def plot_route_on_basemap(coord_pairs,annotes,added_points_param_list=None):
matplotlib.use('nbagg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# matplotlib.use('nbagg')
fig=plt.figure(figsize=(16,12))
ax=fig.add_axes([0.05,0.05,0.95,0.95])
lat_list, lng_list = zip(*coord_pairs)
# setup mercator map projection.
m = Basemap(llcrnrlon=min(lng_list)-2,llcrnrlat=min(lat_list)-2,urcrnrlon=max(lng_list)+2,urcrnrlat=max(lat_list)+2,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=0.,lon_0=0.,lat_ts=0.)
MIN_L_WIDTH=7
POINT_SIZE=2*MIN_L_WIDTH
m.drawcoastlines()
m.fillcontinents()
x_all=[]
y_all=[]
for i,point in enumerate(coord_pairs):
lon = point[-1]
lat = point[0]
x,y = m(*[lon,lat])
x_all.append(x)
y_all.append(y)
if (i!=0 and i!=len(annotes)-1):
plt.annotate(annotes[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"))
plt.annotate(annotes[-1], xy=(x_all[-1],y_all[-1]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.annotate(annotes[0], xy=(x_all[0],y_all[0]), xytext=(POINT_SIZE/2,POINT_SIZE), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7)))
plt.plot(x_all,y_all,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='r')
#----
# plt, m = add_points_to_basemap_plot(plt,m,[1,1])
#----
with open("x.txt",'w') as f:
pass
if added_points_param_list!=None:
added_points_coords = added_points_param_list[0]
names = added_points_param_list[1]
# x_added=[]
# y_added=[]
for i,point in enumerate(added_points_coords):
lat = point[0]
lon = point[-1]
x,y = m(*[lon,lat])
# x_added.append(x)
# y_added.append(y)
# if (i!=0 and i!=len(names)-1):
# plt.annotate(names[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7), ec="none"))
plt.annotate(names[i], xy=(x,y), xytext=(0,-POINT_SIZE*2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.5, 0.7)))
plt.plot(x,y,ls='-',marker='o',ms=POINT_SIZE,lw=MIN_L_WIDTH,alpha=0.5,solid_capstyle='round',color='pink')
with open("x.txt",'a') as f:
f.write("plotted %f,%f\n" % (x,y))
# draw parallels
m.drawparallels(np.arange(-20,0,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
# ax.set_title('Great Circle from New York to London')
# m.bluemarble()
plt.show()
# mpld3.show() # bad rendering
if __name__ == "__main__":
print('No test yet.') | [
"moiseev.sergei@gmail.com"
] | moiseev.sergei@gmail.com |
243a57f48705fe5f22f30fb80a4ffa1f1e9fcd60 | 81abaf1a5ed956ec22fb1a547dac1624e54e7c30 | /third-party/typeshed/stdlib/2and3/warnings.pyi | 2e95533f7fc16ff0b3a56d7f88e3f703e4099031 | [
"Unlicense",
"MIT",
"Apache-2.0"
] | permissive | joschu/blobfile | 11573372ea4749fdab6ab94421a55796e4cbffdc | 0a534f3ad33ca0ba8f5ab27d3f5e41aec2914883 | refs/heads/master | 2021-07-13T19:08:39.695706 | 2019-11-24T22:41:06 | 2019-11-24T22:41:21 | 224,252,657 | 1 | 0 | Unlicense | 2019-11-26T17:49:09 | 2019-11-26T17:49:08 | null | UTF-8 | Python | false | false | 2,353 | pyi | # Stubs for warnings
import sys
from typing import Any, Dict, List, NamedTuple, Optional, overload, TextIO, Tuple, Type, Union, ContextManager
from types import ModuleType
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
@overload
def warn(message: str, category: Optional[Type[Warning]] = ..., stacklevel: int = ...) -> None: ...
@overload
def warn(message: Warning, category: Any = ..., stacklevel: int = ...) -> None: ...
@overload
def warn_explicit(message: str, category: Type[Warning],
filename: str, lineno: int, module: Optional[str] = ...,
registry: Optional[Dict[Union[str, Tuple[str, Type[Warning], int]], int]] = ...,
module_globals: Optional[Dict[str, Any]] = ...) -> None: ...
@overload
def warn_explicit(message: Warning, category: Any,
filename: str, lineno: int, module: Optional[str] = ...,
registry: Optional[Dict[Union[str, Tuple[str, Type[Warning], int]], int]] = ...,
module_globals: Optional[Dict[str, Any]] = ...) -> None: ...
def showwarning(message: str, category: Type[Warning], filename: str,
lineno: int, file: Optional[TextIO] = ...,
line: Optional[str] = ...) -> None: ...
def formatwarning(message: str, category: Type[Warning], filename: str,
lineno: int, line: Optional[str] = ...) -> str: ...
def filterwarnings(action: str, message: str = ...,
category: Type[Warning] = ..., module: str = ...,
lineno: int = ..., append: bool = ...) -> None: ...
def simplefilter(action: str, category: Type[Warning] = ..., lineno: int = ...,
append: bool = ...) -> None: ...
def resetwarnings() -> None: ...
class _Record(NamedTuple):
message: str
category: Type[Warning]
filename: str
lineno: int
file: Optional[TextIO]
line: Optional[str]
@overload
def catch_warnings(*, record: Literal[False] = ..., module: Optional[ModuleType] = ...) -> ContextManager[None]: ...
@overload
def catch_warnings(*, record: Literal[True], module: Optional[ModuleType] = ...) -> ContextManager[List[_Record]]: ...
@overload
def catch_warnings(*, record: bool, module: Optional[ModuleType] = ...) -> ContextManager[Optional[List[_Record]]]: ...
| [
"48501609+cshesse@users.noreply.github.com"
] | 48501609+cshesse@users.noreply.github.com |
1390212b59f1a609de481080cfa340b8b55b6dfd | b144928d199550e0fd2a0a0a21224e4f463e4bc6 | /src/cmssh/filemover.py | bbc8b2abf265ef902f6c600bfe786fe4a2ff5e37 | [] | no_license | dmwm/cmssh | 84f91ca1bb401dc052dcde1f58f42ecee48a3438 | 0cd6e104185938d21b10b053479e890c9f4f3b57 | refs/heads/master | 2016-09-10T19:13:29.567153 | 2014-03-06T20:45:05 | 2014-03-06T20:45:05 | 2,615,169 | 2 | 0 | null | 2012-11-28T18:24:41 | 2011-10-20T18:23:41 | Python | UTF-8 | Python | false | false | 27,912 | py | #!/usr/bin/env python
"""Filemover cli equivalent"""
# system modules
import os
import re
import sys
import json
import stat
import time
import thread
import urllib
import urllib2
import datetime
from multiprocessing import Process
# for DBS2 XML parsing
import xml.etree.ElementTree as ET
# cmssh modules
from cmssh.iprint import print_error, print_info, print_warning
from cmssh.utils import size_format
from cmssh.ddict import DotDict
from cmssh.cms_urls import phedex_url, dbs_url, dbs_instances
from cmssh.cms_objects import CMSObj
from cmssh.utils import execmd
from cmssh.utils import PrintProgress, qlxml_parser
from cmssh.url_utils import get_data
from cmssh.sitedb import SiteDBManager
from cmssh.srmls import srmls_printer, srm_ls_printer
def get_dbs_se(lfn):
"Get original SE from DBS for given LFN"
# TODO: should have transparent access to DBS2/DBS3
query = 'find site where file=%s' % lfn
params = {"api":"executeQuery", "apiversion": "DBS_2_0_9", "query":query}
default_instance = os.environ.get('DBS_INSTANCE')
for inst in dbs_instances():
params.update({"query":query})
os.environ['DBS_INSTANCE'] = inst
data = urllib2.urlopen(dbs_url(), urllib.urlencode(params))
try:
rec = [f for f in qlxml_parser(data, 'site')][0]
sename = rec['site']['site']
except:
continue
os.environ['DBS_INSTANCE'] = default_instance
return sename
os.environ['DBS_INSTANCE'] = default_instance
def file_size(ifile):
"Return file size"
if os.path.isfile(ifile):
return os.stat(ifile)[stat.ST_SIZE]
return 0
def check_permission(dst, verbose=None):
"""
Check permission to write to given destination area
"""
if verbose:
print "Check permission to write to %s" % dst
srmmkdir = os.environ.get('SRM_MKDIR', '')
if not srmmkdir:
print_error('Unable to find srm mkdir command')
sys.exit(1)
cmd = '%s %s' % (srmmkdir, dst)
stdout, stderr = execmd(cmd)
if stderr.find('command not found') != -1:
print 'Unable to find srm mkdir tool'
print help
sys.exit(1)
if stdout.find('SRM-DIR: directory not created') != -1 or\
stdout.find('SRM_FAILURE') != -1:
msg = "Unable to access %s:" % dst
print msg
print "-" * len(msg)
print
print stdout
sys.exit(1)
def check_software(softlist):
"""
Perform the check that Grid middleware is installed on a node
"""
help = 'Please run with --help for more options'
for cmd in softlist:
stdout, stderr = execmd(cmd)
if not stdout:
print 'Unable to find %s' % cmd
print help
sys.exit(1)
def parser(data):
"""Parser DBS2 listFiles output"""
elem = ET.fromstring(data)
for i in elem:
if i.tag == 'file':
yield i.attrib['lfn']
def parse_srmls(data):
"""Parse srm-ls XML output"""
data = data.split('<?xml version="1.0" encoding="UTF-8"?>')
data = '<?xml version="1.0" encoding="UTF-8"?>' + data[-1]
elem = ET.fromstring(data)
for i in elem:
if i.tag == 'file' and i.attrib.has_key('size'):
return i.attrib['size']
def lfns(run=None, dataset=None):
"""
Get lfns list for provided run/dataset
"""
url = dbs_url('files') # DBS3
params = {'detail':'True'}
if run:
args['minrun'] = run
args['maxrun'] = run
if dataset:
args['dataset'] = dataset
params.update(args)
json_dict = get_data(url, params)
for row in json_dict:
yield row['logical_file_name']
def get_username(verbose=None):
"""
Get user name from provided DN
"""
# get DN from grid-proxy-info
cmd = 'grid-proxy-info'
stdout, stderr = execmd(cmd)
if stderr.find('command not found') != -1:
raise Exception(stderr)
userdn = None
try:
for line in stdout.split('\n'):
if line.find('issuer') != -1:
issuer, userdn = line.split(' : ')
except:
raise Exception('Unable to parse grid-proxy-info:\n%s' % stdout)
if verbose:
print "userdn :", userdn
if not userdn:
msg = 'Unable to determine your DN, please run grid-proxy-init'
raise Exception(msg)
mgr = SiteDBManager()
user = mgr.get_user(userdn)
return user
def nodes(select=True):
"""
Yield list of Phedex nodes, I only select T2 and below
"""
result = get_data(phedex_url('nodes'), {})
pat = re.compile('^T[0-1]_[A-Z]+(_)[A-Z]+')
lnodes = []
for row in result['phedex']['node']:
if select and pat.match(row['name']):
continue
msg = "%s, SE: %s, description %s/%s" \
% (row['name'], row['se'], row['technology'], row['kind'])
lnodes.append(msg)
lnodes.sort()
for row in lnodes:
print row
def resolve_srm_path(node, verbose=None):
"""
Use TFC phedex API to resolve srm path for given node
"""
params = {'node':node}
result = get_data(phedex_url('tfc'), params)
for row in result['phedex']['storage-mapping']['array']:
if row['protocol'] == 'srmv2' and row['element_name'] == 'lfn-to-pfn':
yield (row['result'], row['path-match'])
def resolve_user_srm_path(node, ldir='/store/user', verbose=None):
"""
Use TFC phedex API to resolve srm path for given node
"""
# change ldir if user supplied full path, e.g. /xrootdfs/cms/store/...
ldir = '/store/' + ldir.split('/store/')[-1]
params = {'node':node, 'lfn':ldir, 'protocol': 'srmv2'}
result = get_data(phedex_url('lfn2pfn'), params)
for row in result['phedex']['mapping']:
yield row['pfn']
def lfn2pfn(lfn, sename, mgr=None):
"Find PFN for given LFN and SE"
pfnlist = []
if not mgr:
mgr = SiteDBManager()
cmsname = mgr.get_name(sename)
if cmsname:
params = {'protocol':'srmv2', 'lfn':lfn, 'node':cmsname}
result = get_data(phedex_url('lfn2pfn'), params)
try:
for item in result['phedex']['mapping']:
pfn = item['pfn']
if pfn not in pfnlist:
pfnlist.append(pfn)
except:
msg = "Fail to look-up PFNs in Phedex\n" + str(result)
print msg
return pfnlist
def get_pfns(lfn, verbose=None):
"""
Look-up LFN in Phedex and get corresponding list of PFNs
"""
pfnlist = []
selist = []
params = {'se':'*', 'lfn':lfn}
json_dict = get_data(phedex_url('fileReplicas'), params)
ddict = DotDict(json_dict)
if not json_dict['phedex']['block']:
return pfnlist, selist
for fname in ddict.get('phedex.block.file'):
for replica in fname['replica']:
cmsname = replica['node']
se = replica['se']
if se not in selist:
selist.append(se)
# query Phedex for PFN
params = {'protocol':'srmv2', 'lfn':lfn, 'node':cmsname}
result = get_data(phedex_url('lfn2pfn'), params)
try:
for item in result['phedex']['mapping']:
pfn = item['pfn']
if pfn not in pfnlist:
pfnlist.append(pfn)
except:
msg = "Fail to look-up PFNs in Phedex\n" + str(result)
print msg
continue
return pfnlist, selist
def pfn_dst(lfn, dst, verbose=None):
"""
Look-up LFN in Phedex and return pfn dst for further processing
"""
dstfname = None
pat = re.compile('^T[0-9]_[A-Z]+(_)[A-Z]+')
if pat.match(dst):
dst_split = dst.split(':')
dst = dst_split[0]
if len(dst_split) == 1: # copy to the node
local_path = dst_split[1]
for srm_path, lfn_match in resolve_srm_path(dst, verbose):
lfn_pat = re.compile(lfn_match)
if lfn_pat.match(lfn):
srm_path = srm_path.replace('\?', '?').replace('$1', local_path)
if verbose:
print "Resolve %s into %s" % (dst, srm_path)
dst = srm_path
else:
paths = [p for p in resolve_user_srm_path(dst, verbose=verbose)]
dst = '%s/%s' % (paths[0], get_username())
check_permission(dst, verbose)
else:
if dst.find('file:///') == -1:
dstfname = dst.split('/')[-1]
if dstfname == '.':
dstfname = None
if dst[0] == '/': # absolute path
if os.path.isdir(dst):
ddir = dst
dstfname = None
else:
ddir = '/'.join(dst.split('/')[:-1])
if not os.path.isdir(ddir):
msg = 'Provided destination directory %s does not exists' % ddir
raise Exception(msg)
dst = 'file:///%s' % ddir
else:
ddir = '/'.join(dst.split('/')[:-1]).replace('$PWD', os.getcwd())
if os.path.isdir(ddir):
dst = 'file:///%s' % os.path.join(os.getcwd(), ddir)
else:
dst = 'file:///%s' % os.getcwd()
pfnlist = []
if os.path.isfile(lfn) or lfn.find('file:///') != -1: # local file
pfn = lfn.replace('file:///', '')
if pfn[0] != '/':
pfn = 'file:///%s' % os.path.join(os.getcwd(), pfn)
else:
pfn = 'file:///%s' % pfn
pfnlist = [pfn]
else:
if lfn.find(':') != -1:
node, lfn = lfn.split(':')
params = {'node':node, 'lfn':lfn, 'protocol':'srmv2'}
method = 'lfn2pfn'
else:
params = {'se':'*', 'lfn':lfn}
method = 'fileReplicas'
json_dict = get_data(phedex_url(method), params)
ddict = DotDict(json_dict)
if verbose:
print "Look-up LFN:"
print lfn
phedex = json_dict['phedex']
if phedex.has_key('mapping'):
if not phedex['mapping']:
msg = "LFN: %s\n" % lfn
msg += 'No replicas found\n'
msg += str(json_dict)
raise Exception(msg)
filelist = ddict.get('phedex.mapping.pfn')
if not filelist:
filelist = []
if isinstance(filelist, basestring):
filelist = [filelist]
for fname in filelist:
pfnlist.append(fname)
elif phedex.has_key('block') and not phedex['block']:
msg = 'No replicas found in PhEDEx, will try to get original SE from DBS'
print_warning(msg)
sename = get_dbs_se(lfn)
msg = 'Orignal LFN site %s' % sename
print_info(msg)
mgr = SiteDBManager()
pfnlist = lfn2pfn(lfn, sename, mgr)
filelist = ddict.get('phedex.block.file')
if not filelist:
filelist = []
for fname in filelist:
for replica in fname['replica']:
cmsname = replica['node']
se = replica['se']
if verbose:
print "found LFN on node=%s, se=%s" % (cmsname, se)
if cmsname.count('T0', 0, 2) == 1:
continue # skip T0's
# query Phedex for PFN
params = {'protocol':'srmv2', 'lfn':lfn, 'node':cmsname}
result = get_data(phedex_url('lfn2pfn'), params)
try:
for item in result['phedex']['mapping']:
pfn = item['pfn']
if pfn not in pfnlist:
pfnlist.append(pfn)
except:
msg = "Fail to look-up PFNs in Phedex\n" + str(result)
print msg
continue
if verbose > 1:
print "PFN list:"
for pfn in pfnlist:
print pfn
# finally return pfn and dst paths w/ file for further processing
for item in pfnlist:
ifile = item.split("/")[-1] if not dstfname else dstfname
yield item, '%s/%s' % (dst, ifile)
def get_size(surl, verbose=None):
"""
Execute srm-ls <surl> command and retrieve file size information
"""
srmls = os.environ.get('SRM_LS', '')
if not srmls:
print_error('Unable to find srm ls tool')
sys.exit(1)
if srmls.find('srm-ls') != -1:
srmargs = ''
else:
srmargs = '-2'
cmd = '%s %s %s' % (srmls, srmargs, surl)
if verbose:
print_info(cmd)
if cmd.find('file:///') != -1:
return file_size(cmd.split('file:///')[-1])
stdout, stderr = execmd(cmd)
if verbose:
print_info(stdout + stderr)
orig_size = 0
if cmd.find('file:///') != -1: # srm-ls returns XML
if srmls.find('srm-ls') != -1:
orig_size = parse_srmls(stdout)
else:
try:
orig_size = stdout.split()[0].strip()
except:
return 0
else:
if srmls.find('srm-ls') != -1:
for line in stdout.split('\n'):
if line.find('Bytes') != -1:
orig_size = line.replace('\n', '').split('=')[-1]
else:
try:
orig_size = stdout.split()[0].strip()
except:
return 0
return orig_size
def check_file(src, dst, verbose):
"""
Check if file is transfered and return dst, dst_size upon success.
"""
# find file size from replica
orig_size = get_size(src, verbose)
if verbose:
print "%s, size %s" % (src, orig_size)
if not orig_size or orig_size == 'null':
return False
# find file size from destination (if any)
dst_size = get_size(dst, verbose)
if verbose:
print "%s, size %s" % (dst, dst_size)
if not dst_size or dst_size == 'null':
return False
if int(orig_size) == int(dst_size):
return (dst, int(dst_size))
return False
def execute(cmds, src, dst, verbose):
"""
Execute given command, but also check if file is in place at dst
"""
status = check_file(src, dst, verbose)
if status:
return status
else:
if isinstance(cmds, basestring):
stdout, stderr = execmd(cmds)
if verbose:
print_info('Output of %s' % cmd)
print stdout + stderr
status = check_file(src, dst, verbose)
elif isinstance(cmds, list):
for cmd in cmds:
if not cmd:
continue
stdout, stderr = execmd(cmd)
if verbose:
print_info('Output of %s' % cmd)
print stdout + stderr
status = check_file(src, dst, verbose)
if status:
return status
return status
def active_jobs(queue):
"Return number of active jobs in a queue"
njobs = 0
for _, (proc, _status) in queue.items():
if proc.is_alive():
njobs += 1
return njobs
def worker(queue, threshold):
"""
Worker which start processes in a queue and monitor that number of
jobs does not exceed a given threshold
"""
while True:
njobs = active_jobs(queue)
if njobs < threshold:
# start process
for lfn, (proc, status) in queue.items():
if active_jobs(queue) >= threshold:
break
if not status and not proc.is_alive():
proc.start()
queue[lfn] = (proc, 'started')
time.sleep(5)
class FileMover(object):
def __init__(self):
self.instance = "Instance at %d" % self.__hash__()
self.queue = {} # download queue
threshold = os.environ.get('CMSSH_TRANSFER_LIMIT', 3)
thread.start_new_thread(worker, (self.queue, threshold))
self.methods = ['xrdcp', 'lcgcp', 'srmcp']
def transfer_cmds(self, lfn, dst, verbose=0):
"Generate transfer commands"
xrdcmd = 'xrdcp root://cms-xrd-global.cern.ch/%s %s' % (lfn, dst)
if not os.path.isdir(dst):
xrdcmd = ''
srmcp = os.environ.get('SRM_CP', '')
if srmcp.find('srm-copy') != -1:
srmargs = '-pushmode -statuswaittime 30 -3partycopy -delegation false -dcau false'
else:
srmargs = '-srm_protocol_version=2 -retry_num=1 -streams_num=1 -debug'
for pfn, pdst in pfn_dst(lfn, dst, 0): # last zero is verbose=0
lcg = os.environ.get('LCG_CP', '')
if lcg:
if verbose:
vflag = '-v'
else:
vflag = ''
lcgcmd = '%s %s -b -D srmv2 %s %s' % (lcg, vflag, pfn, pdst)
else:
lcgcmd = ''
if srmcp.find('srm-copy') != -1:
srmcmd = '%s %s %s %s' % (srmcp, pfn, pdst, srmargs)
else:
srmcmd = '%s %s %s %s' % (srmcp, srmargs, pfn, pdst)
yield xrdcmd, lcgcmd, srmcmd, pfn, pdst
def copy(self, lfn, dst, method='xrdcp', verbose=0, background=False):
"""Copy LFN to given destination"""
if method not in self.methods:
print_error('Unknown transfer method "%s"' % method)
return 'fail'
for xrdcmd, lcgcmd, srmcmd, pfn, pdst in self.transfer_cmds(lfn, dst, verbose):
if method == 'xrdcp':
cmd = xrdcmd
elif method == 'lcgcp':
cmd = lcgcmd
else:
cmd = srmcmd
if not cmd:
return 'fail'
if background:
# I need to pass list of commands for transfer method
# for that I'll use background variable
background = [xrdcmd, lcgcmd, srmcmd]
status = self.transfer(cmd, lfn, pfn, pdst, verbose, background)
if status == 'success' or status == 'accepted':
return status
return 'fail'
def transfer(self, cmd, lfn, pfn, pdst, verbose=0, background=False):
"""Copy LFN to given destination"""
err = 'Unable to identify total size of the file,'
err += ' GRID middleware fails.'
if not background:
bar = PrintProgress('Fetching LFN info')
if verbose:
print_info(cmd)
if background:
# here background is a list of commands
if not isinstance(background, list):
return 'fail'
proc = Process(target=execute, args=(background, pfn, pdst, 0))
self.queue[lfn] = (proc, None)
return 'accepted'
elif verbose:
status = execute(cmd, pfn, pdst, verbose)
if not status:
return 'fail'
else:
dst, dst_size = status
size = size_format(dst_size)
if not size or not dst_size:
print_error(err)
print "Status of transfer:\n", status
return 'fail'
else:
print "\nDone, file located at %s (%s)" \
% (dst, size_format(dst_size))
return 'success'
else:
ifile = pdst
pfn_size = get_size(pfn)
if pfn_size and pfn_size != 'null':
tot_size = float(pfn_size)
bar.print_msg('LFN size=%s' % size_format(tot_size))
bar.init('Download in progress:')
proc = Process(target=execute, args=(cmd, pfn, pdst, verbose))
proc.start()
while True:
if proc.is_alive():
size = get_size(ifile)
if not size or size == 'null':
bar.refresh('')
pass
else:
progress = float(size)*100/tot_size
bar.refresh(progress)
if progress == 100:
break
else:
break
time.sleep(0.5)
bar.clear()
status = check_file(pfn, pdst, verbose)
if status:
return 'success'
else:
print_error(err)
return 'fail'
return 'fail'
def list_lfn(self, lfn, verbose=0):
"""List LFN"""
pat_lfn = re.compile('^/.*\.root$')
if pat_lfn.match(lfn):
pfnlist, selist = get_pfns(arg, verbose)
for pfn in pfnlist:
print '%s %s' % (lfn, get_size(pfn, verbose))
def list_se(self, arg, verbose=0):
"""list content of given directory on SE"""
try:
node, ldir = arg.split(':')
except:
msg = 'Given argument "%s" does not represent SE:dir' % arg
raise Exception(msg)
srmls = os.environ.get('SRM_LS', '')
if not srmls:
print_error('Unable to find srm ls tool')
sys.exit(1)
dst = [r for r in resolve_user_srm_path(node, ldir)][0]
if os.environ.get('LCG_LS', ''):
cmd = "%s -l -v -b -D srmv2 %s" % (os.environ['LCG_LS'], dst)
else:
if srmls.find('srm-ls') != -1:
cmd = "%s %s -fulldetailed" % (srmls, dst)
else:
cmd = "%s -2 -l %s" % (srmls, dst)
if verbose:
print cmd
stdout, stderr = execmd(cmd)
if stderr:
print_error(stderr)
output = []
row = {}
if os.environ.get('LCG_LS', ''):
for line in stdout.split('\n'):
if line.find('SE type') != -1:
continue
output.append(line)
return '\n'.join(output)
elif srmls.find('srmls') != -1:
for line in srmls_printer(stdout, dst.split('=')[-1]):
output.append(line)
return '\n'.join(output)
else:
for line in srm_ls_printer(stdout, dst.split('=')[-1]):
output.append(line)
return '\n'.join(output)
def rm_lfn(self, arg, verbose=0):
"""Remove user lfn from a node"""
try:
node, lfn = arg.split(':')
except:
msg = 'Given argument "%s" does not represent SE:LFN' % arg
raise Exception(msg)
cmd = os.environ.get('SRM_RM', '')
dst = [r for r in resolve_user_srm_path(node)][0]
dst, path = dst.split('=')
if dst[-1] != '=':
dst += '='
for item in lfn.split('/'):
if not item or item in path:
continue
path += '/%s' % item
cmd = "%s %s" % (cmd, dst+path)
if verbose:
print cmd
try:
stdout, stderr = execmd(cmd)
if verbose:
print_info(stdout + stderr)
except:
return 'fail'
return 'success'
def rmdir(self, path, verbose=0):
"""rmdir command"""
spath = path.split(':')
if len(spath) == 1:
node = spath[0]
ldir = '/store/user'
else:
node = spath[0]
ldir = spath[1]
dst = [r for r in resolve_user_srm_path(node, ldir)][0]
cmd = '%s %s' % (os.environ.get('SRM_RMDIR', ''), dst)
if verbose:
print_info(cmd)
try:
stdout, stderr = execmd(cmd)
if verbose:
print_info(stdout + stderr)
except:
return 'fail'
return 'success'
def mkdir(self, path, verbose=0):
"""mkdir command"""
spath = path.split(':')
if len(spath) == 1:
node = spath[0]
ldir = '/store/user'
else:
node = spath[0]
ldir = spath[1]
dst = [r for r in resolve_user_srm_path(node, ldir)][0]
cmd = '%s %s' % (os.environ.get('SRM_MKDIR', ''), dst)
if verbose:
print_info(cmd)
try:
stdout, stderr = execmd(cmd)
if verbose:
print_info(stdout + stderr)
except:
return 'fail'
return 'success'
def lfn_exists(lfn, dst):
"Check if given LFN exists at local destination"
if dst[0] == '/' or dst[0] == '.':
fname = lfn.split('/')[-1]
if os.path.isdir(dst):
if os.path.exists(os.path.join(dst, fname)):
return True
if os.path.exists(dst):
return True
return False
FM_SINGLETON = FileMover()
def copy_lfn(lfn, dst, verbose=0, background=False, overwrite=False):
"""Copy lfn to destination"""
if overwrite:
if os.path.isfile(dst):
os.remove(dst)
if lfn_exists(lfn, dst):
if os.path.isdir(dst):
fname = lfn.split('/')[-1]
if os.path.exists(os.path.join(dst, fname)):
os.remove(os.path.join(dst, fname))
else:
if lfn_exists(lfn, dst):
if os.path.isdir(dst):
fname = os.path.join(dst, lfn.split('/')[-1])
if not os.path.exists(fname):
fname = None
elif os.path.isfile(dst) and os.path.exists(dst):
fname = dst
else:
fname = None
print_warning('Destination %s is not local disk')
if fname:
print_warning('File %s already exists' % fname)
return 'fail'
method = os.environ.get('CMSSH_TRANSFER_METHOD', 'xrdcp')
status = FM_SINGLETON.copy(lfn, dst, method, verbose, background)
if status == 'fail':
print_warning('xrdcp fails to copy file, fallback to GRID middleware mechanism')
if os.environ.get('LCG_CP', ''):
status = FM_SINGLETON.copy(lfn, dst, 'lcgcp', verbose, background)
else:
status = FM_SINGLETON.copy(lfn, dst, 'srmcp', verbose, background)
return status
def dqueue(arg=None):
"""Return download queue"""
download_queue = FM_SINGLETON.queue
alive = []
waiting = []
ended = []
for lfn, (proc, status) in download_queue.items():
if not status:
waiting.append(lfn)
elif proc.is_alive():
alive.append(lfn)
else:
ended.append((lfn, proc.exitcode))
del download_queue[lfn]
print "In progress: %s jobs" % len(alive)
if arg and arg == 'list':
for lfn in alive:
print lfn
if len(alive): print
print "Waiting : %s jobs" % len(waiting)
if arg and arg == 'list':
for lfn in waiting:
print lfn
if len(waiting): print
print "Finished : %s jobs" % len(ended)
if arg and arg == 'list':
for lfn, code in ended:
print "%s, exit code %s" % (lfn, code)
def list_lfn(lfn, verbose=0):
"""List lfn info"""
return FM_SINGLETON.list_lfn(lfn, verbose)
def list_se(arg, verbose=0):
"""List SE content"""
return FM_SINGLETON.list_se(arg, verbose)
def rm_lfn(lfn, verbose=0):
"""Remove lfn from destination"""
return FM_SINGLETON.rm_lfn(lfn, verbose)
def mkdir(dst, verbose=0):
"""mkdir command"""
return FM_SINGLETON.mkdir(dst, verbose)
def rmdir(dst, verbose=0):
"""rmdir command"""
return FM_SINGLETON.rmdir(dst, verbose)
| [
"vkuznet@gmail.com"
] | vkuznet@gmail.com |
3ca06b13017075a0b08c397157ed7c355b8b5328 | 5f262dbebb61d8ddd67d4e605fb61c194ef47df8 | /windmill/models/schemas/__init__.py | af299db61f166b36b3980dc4fcfa6a59a5b3b757 | [
"Apache-2.0"
] | permissive | bhavaniravi/windmill | 5d157794c80cc416a122e601c07e1ee40b843b2a | 0bae5c34652d8366f6fff08ff7879d24a76c91b5 | refs/heads/master | 2021-04-04T04:54:31.841580 | 2020-03-23T11:35:22 | 2020-03-23T11:35:53 | 248,425,735 | 1 | 0 | Apache-2.0 | 2020-03-19T06:17:38 | 2020-03-19T06:17:37 | null | UTF-8 | Python | false | false | 25 | py | from . import app_schemas | [
"micsalama@gmail.com"
] | micsalama@gmail.com |
92fd71893fb2c2abeccd87ac9e76f3529a90d3d3 | 145648d14728076d3e9859ee3293b4210d21f67f | /3_lists.py | e73606d28cfdd3b0b792e2e6883113728a85f085 | [] | no_license | ankitmazumder/executeoncommand | 7d1bb9ad402a6840a3b622a7fe1680fc8fb688e6 | 4000d21b87ad4e9540a8714be820983149f70cf8 | refs/heads/master | 2023-08-22T14:47:27.937082 | 2021-09-28T16:33:25 | 2021-09-28T16:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | list_name = list()
print(list_name)
string_list = ["one", "two", "three"]
print(string_list)
num_list = [1, 2, 3]
print(num_list)
mixed_list = [1, 2, 3, "one", "two", "three", [4, 5, 6], {"key":"value"}]
print(mixed_list)
vegetables = ["Potatoes", "Carrots", "Cauliflower", "Broccoli", "Bell Pepper"]
print(vegetables)
print(vegetables[0])
print(vegetables[3])
print(vegetables[-1])
#Slicing
print(vegetables[0:3]) #2nd number is exclusive
print(vegetables[2:]) #Absence of a number after colon - assumes last position
print(vegetables[:4]) #Absence of a number before colon - assumes first position (0)
print(vegetables[-2:]) #Last 2 items
print(vegetables[::-1]) #Print list in reverse order
print(vegetables[::1])
print(vegetables[::2])
vegetables2 = ["Sweet Potatoes", "Green Beans", "Egg plant"]
#Concatenate
print(vegetables + vegetables2)
#useful functions
print(dir(vegetables))
help(vegetables.extend)
vegetables.append("Cabbage")
print(vegetables)
vegetables.pop()
print(vegetables)
vegetables.remove("Carrots")
print(vegetables)
vegetables.extend(vegetables2)
print(vegetables)
vegetables.insert(0, "Spinach")
print(vegetables)
vegetables.sort()
print(vegetables)
vegetables.sort(reverse=True)
sorted_vegetables = sorted(vegetables)
print(vegetables)
print(sorted_vegetables)
print(vegetables)
print(vegetables.index("Green Beans"))
vegetables3 = vegetables
print(vegetables3)
vegetables3.remove("Sweet Potatoes")
print(vegetables3)
print(vegetables)
vegetables3 = vegetables.copy()
print(vegetables3)
vegetables3.remove("Egg plant")
print(vegetables3)
print(vegetables)
print(vegetables.count("Spinach"))
vegetables.append("Spinach")
print(vegetables)
print(vegetables.count("Spinach"))
print(len(vegetables))
numbers = [10,20,30,40,50]
print(min(numbers))
print(max(numbers))
print(10 in numbers)
print(11 not in numbers)
vegetables3.clear()
print(vegetables3)
| [
"noreply@github.com"
] | noreply@github.com |
4ffb6ea7c83ee437d39a2753bb202e509645745e | bd0f1da30c87101267fc53386a378f847c9fd0cd | /dbgtype.py | ac3d163a57e746ae58a740154a58b7770644da17 | [
"BSD-3-Clause"
] | permissive | fengjixuchui/LKD | cf2db9bf0023d832a8e9c08982801b4218ce97c1 | f388b5f8c08b7bba2a31c5a16ea64add6cc2dd1a | refs/heads/master | 2020-04-17T02:32:43.294752 | 2015-10-21T13:59:36 | 2015-10-21T13:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,741 | py | # # Experimental code # #
# Idea: make 2 type
# One for field (with field name / bitfield / parentclass / etc)
# One for the type above (fieldname.type)
# That have info about array size and co
import struct
from windows.generated_def.winstructs import *
class DbgEngTypeBase(object):
def __init__(self, module, typeid, kdbg):
self.module = module
self.module_name = kdbg.get_symbol(module)[0]
self.typeid = typeid
self.kdbg = kdbg
def SymGetTypeInfo(self, GetType):
return self.kdbg.SymGetTypeInfo(self.module, self.typeid, GetType)
@property
def size(self):
return self.kdbg.get_type_size(self.module, self.typeid)
@property
def type(self):
#if not self.is_array:
# raise ValueError("array_type on non array type")
sub_type = self.kdbg.SymGetTypeInfo(self.module, self.typeid, TI_GET_TYPE)
return DbgEngType(self.module, sub_type, self.kdbg)
@property
def base_type(self):
#if not self.is_array:
# raise ValueError("array_type on non array type")
sub_type = self.kdbg.SymGetTypeInfo(self.module, self.typeid, TI_GET_BASETYPE)
return DbgEngType(self.module, sub_type, self.kdbg)
@property
def raw_name(self):
return str(self.SymGetTypeInfo(TI_GET_SYMNAME))
def __call__(self, addr):
return DbgEngtypeMapping(self, addr)
class DbgEngType(DbgEngTypeBase):
@property
def name(self):
return self.kdbg.get_type_name(self.module, self.typeid)
@property
def is_array(self):
"Todo: Il doit bien y avoir un truc moins crade :D"
return self.name.endswith("[]")
@property
def is_pointer(self):
"Todo: Il doit bien y avoir un truc moins crade :D"
return self.name.endswith("*")
@property
def fields(self):
children = self.kdbg.get_childs_types(self.module, self.typeid)
return [DbgEngField(self.module, x, self.kdbg) for x in children.Types]
@property
def fields_dict(self):
return {x.name: x for x in self.fields}
@property
def number_elt(self):
return self.SymGetTypeInfo(TI_GET_COUNT)
def __repr__(self):
return '<DbgEngType "{0}">'.format(self.name)
def __call__(self, addr):
return get_mapped_type(self, addr)
class DbgEngField(DbgEngTypeBase):
name = DbgEngTypeBase.raw_name
@property
def parent(self):
parent_typeid = self.SymGetTypeInfo(TI_GET_CLASSPARENTID)
return DbgEngType(self.module, parent_typeid, self.kdbg)
@property
def offset(self):
return self.SymGetTypeInfo(TI_GET_OFFSET)
@property
def bitoff(self):
return self.SymGetTypeInfo(TI_GET_BITPOSITION)
def __repr__(self):
return '<Field <{0}.{1}> at offset <{2}> of type <{3}>>'.format(self.parent.name, self.raw_name, hex(self.offset), self.type.name)
def get_mapped_type(type, addr):
if type.is_array:
return DbgEngtypeMappingPtr(type, addr)
if type.is_pointer and type.name not in ["void*"]:
return DbgEngtypeMappingPtr(type, addr)
# basic type: no fields
if not type.fields:
unpack_by_size = {1:"B", 2:'H', 4:'I', 8:'Q'}
data = type.kdbg.read_virtual_memory(addr, type.size)
return struct.unpack("<" + unpack_by_size[type.size], data)[0]
return DbgEngtypeMapping(type, addr)
class DbgEngtypeMapping(object):
def __init__(self, type, addr):
self.type = type
self.type_field_dict = {x.name : x for x in type.fields}
self.addr = addr
self.kdbg = type.kdbg
def __getattr__(self, name):
if name not in self.type_field_dict:
raise AttributeError
field = self.type_field_dict[name]
addr = self.addr + field.offset
# TODO: bitfield
return get_mapped_type(field.type, addr)
def __repr__(self):
return "<Mapped {0} on addr {1}>".format(self.type.name, hex(self.addr))
class DbgEngtypeMappingPtr(object):
def __init__(self, type, addr):
self.type = type
self.addr = addr
self.kdbg = type.kdbg
if not self.type.is_array and not self.type.is_pointer:
raise ValueError('DbgEngtypeMappingPtr on non ptr type')
def __getitem__(self, n):
if self.type.is_array:
addr = self.addr + self.type.size * n
else:
addr = self.type.kdbg.read_ptr(self.addr)
addr += self.type.size * n
target_t = self.type.type
return get_mapped_type(target_t, addr)
# Example
# >>> k = kdbg.get_type("nt", "_KPRCB")
# >>> t = k(0xfffff8016c167000)
# >>> t.WheaInfo
# 18446708889364968624L
| [
"firstname.lastname@sogeti.com"
] | firstname.lastname@sogeti.com |
cd4907ec3488eeaa4af0b6adb78c6fe463d8811d | 4142b8c513d87361da196631f7edd82f11465abb | /python/round135/219A.py | 84c2546d1739cabe735229c97479d28929b9d4e4 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from collections import Counter
k = int(input())
s = input()
d = Counter(s)
res = ''
for t in d:
val = d[t]
if val % k == 0:
res += t * (val // k)
else:
res = '-1'
break
if res != '-1':
res *= k
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
39404acc2db6c62f24ba2846e40a3ce78fde1adb | 23560875366953541985d881a8f767dca545c718 | /python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/cachecontrol/serialize.py | 4e4baa55963e4cfc3eb1597188278eac7e7b1356 | [
"MIT"
] | permissive | GinaJame/Portfolio_MLH | cffccdfa0e7004afaf8634427b94d64359c1a488 | 541709dcf034ddca885a8b08f9922dc352c113f8 | refs/heads/master | 2023-06-21T00:27:14.795285 | 2021-07-28T01:40:29 | 2021-07-28T01:40:29 | 380,074,076 | 0 | 0 | MIT | 2021-07-28T01:40:30 | 2021-06-24T23:31:24 | Python | UTF-8 | Python | false | false | 7,090 | py | import base64
import io
import json
import zlib
from pip._vendor import msgpack
from pip._vendor.requests.structures import CaseInsensitiveDict
from .compat import HTTPResponse, pickle, text_type
def _b64_decode_bytes(b):
return base64.b64decode(b.encode("ascii"))
def _b64_decode_str(s):
return _b64_decode_bytes(s).decode("utf8")
class Serializer(object):
def dumps(self, request, response, body=None):
response_headers = CaseInsensitiveDict(response.headers)
if body is None:
body = response.read(decode_content=False)
# NOTE: 99% sure this is dead code. I'm only leaving it
# here b/c I don't have a test yet to prove
# it. Basically, before using
# `cachecontrol.filewrapper.CallbackFileWrapper`,
# this made an effort to reset the file handle. The
# `CallbackFileWrapper` short circuits this code by
# setting the body as the content is consumed, the
# result being a `body` argument is *always* passed
# into cache_response, and in turn,
# `Serializer.dump`.
response._fp = io.BytesIO(body)
# NOTE: This is all a bit weird, but it's really important that on
# Python 2.x these objects are unicode and not str, even when
# they contain only ascii. The problem here is that msgpack
# understands the difference between unicode and bytes and we
# have it set to differentiate between them, however Python 2
# doesn't know the difference. Forcing these to unicode will be
# enough to have msgpack know the difference.
data = {
u"response": {
u"body": body,
u"headers": dict(
(text_type(k), text_type(v)) for k, v in response.headers.items()
),
u"status": response.status,
u"version": response.version,
u"reason": text_type(response.reason),
u"strict": response.strict,
u"decode_content": response.decode_content,
}
}
# Construct our vary headers
data[u"vary"] = {}
if u"vary" in response_headers:
varied_headers = response_headers[u"vary"].split(",")
for header in varied_headers:
header = text_type(header).strip()
header_value = request.headers.get(header, None)
if header_value is not None:
header_value = text_type(header_value)
data[u"vary"][header] = header_value
return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
def loads(self, request, data):
# Short circuit if we've been given an empty set of data
if not data:
return
# Determine what version of the serializer the data was serialized
# with
try:
ver, data = data.split(b",", 1)
except ValueError:
ver = b"cc=0"
# Make sure that our "ver" is actually a version and isn't a false
# positive from a , being in the data stream.
if ver[:3] != b"cc=":
data = ver + data
ver = b"cc=0"
# Get the version number out of the cc=N
ver = ver.split(b"=", 1)[-1].decode("ascii")
# Dispatch to the actual load method for the given version
try:
return getattr(self, "_loads_v{}".format(ver))(request, data)
except AttributeError:
# This is a version we don't have a loads function for, so we'll
# just treat it as a miss and return None
return
def prepare_response(self, request, cached):
"""Verify our vary headers match and construct a real urllib3
HTTPResponse object.
"""
# Special case the '*' Vary value as it means we cannot actually
# determine if the cached response is suitable for this request.
# This case is also handled in the controller code when creating
# a cache entry, but is left here for backwards compatibility.
if "*" in cached.get("vary", {}):
return
# Ensure that the Vary headers for the cached response match our
# request
for header, value in cached.get("vary", {}).items():
if request.headers.get(header, None) != value:
return
body_raw = cached["response"].pop("body")
headers = CaseInsensitiveDict(data=cached["response"]["headers"])
if headers.get("transfer-encoding", "") == "chunked":
headers.pop("transfer-encoding")
cached["response"]["headers"] = headers
try:
body = io.BytesIO(body_raw)
except TypeError:
# This can happen if cachecontrol serialized to v1 format (pickle)
# using Python 2. A Python 2 str(byte string) will be unpickled as
# a Python 3 str (unicode string), which will cause the above to
# fail with:
#
# TypeError: 'str' does not support the buffer interface
body = io.BytesIO(body_raw.encode("utf8"))
return HTTPResponse(body=body, preload_content=False, **cached["response"])
def _loads_v0(self, request, data):
# The original legacy cache data. This doesn't contain enough
# information to construct everything we need, so we'll treat this as
# a miss.
return
def _loads_v1(self, request, data):
try:
cached = pickle.loads(data)
except ValueError:
return
return self.prepare_response(request, cached)
def _loads_v2(self, request, data):
try:
cached = json.loads(zlib.decompress(data).decode("utf8"))
except (ValueError, zlib.error):
return
# We need to decode the items that we've base64 encoded
cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"])
cached["response"]["headers"] = dict(
(_b64_decode_str(k), _b64_decode_str(v))
for k, v in cached["response"]["headers"].items()
)
cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"])
cached["vary"] = dict(
(_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
for k, v in cached["vary"].items()
)
return self.prepare_response(request, cached)
def _loads_v3(self, request, data):
# Due to Python 2 encoding issues, it's impossible to know for sure
# exactly how to load v3 entries, thus we'll treat these as a miss so
# that they get rewritten out as v4 entries.
return
def _loads_v4(self, request, data):
try:
cached = msgpack.loads(data, raw=False)
except ValueError:
return
return self.prepare_response(request, cached)
| [
"dakshinabp@berkeley.edu"
] | dakshinabp@berkeley.edu |
6f1ae0b6a986c2ed2588756e942fa6923ab9c265 | 3958007e70f061b77c3637024f98dfc8b6187534 | /DataProvider/ModisProvider.py | 8afaedc96a67c2896e392f2aab320ba5a46eb6fc | [] | no_license | lijiao19320/ProjectTransform | d040b02c766db642cb044b0e8d550379da2789d0 | 302a2fb30fa7ae63d2ceb560ab2d1d7fbecb1385 | refs/heads/master | 2020-12-25T22:19:01.756560 | 2016-09-05T02:27:28 | 2016-09-05T02:27:28 | 64,626,631 | 0 | 0 | null | 2016-08-01T01:46:00 | 2016-08-01T01:46:00 | null | UTF-8 | Python | false | false | 10,476 | py | from DataProvider import *
from HdfOperator import *
import types
import numpy as N
from Parameters import *
from natsort import natsorted, ns
class ModisProvider(DataProvider):
def __init__(self):
super(ModisProvider, self).__init__()
self.__AuxiliaryDataNamesList = dict()
self.__HdfFileHandleList = dict()
self.__obsDataCount = 0
self.__description = 'NULL'
self.__BandWaveLenthList = None
self.__HdfOperator = HdfOperator()
self.__longitude = None
self.__latitude = None
self.__dataRes = 0
self.__dataHeight = 0
self.__dataWidth = 0
self.__band = 0
self.__refbandname = None
self.__emisbandname = None
return
# __dataRes = 1000
# __dataHeight = 3660
# __dataWidth = 1354
# __obsDataCount = 31
# __band = 0
# __waveLenthlist = None
def Dispose(self):
self.__AuxiliaryDataNamesList.clear()
if self.__BandWaveLenthList is not None:
del self.__BandWaveLenthList
self.__BandWaveLenthList = None
# del self.__AuxiliaryDataNamesList
for filehandle in self.__HdfFileHandleList:
self.__HdfFileHandleList[filehandle].end()
self.__HdfFileHandleList.clear()
self.__description = 'NULL'
self.__obsDataCount = 0
super(ModisProvider, self).Dispose()
def __InitOrbitInfo(self):
self.OrbitInfo.Sat = 'Modis'
self.OrbitInfo.OrbitDirection = ''
self.OrbitInfo.Width = self.__dataWidth
self.OrbitInfo.Height = self.__dataHeight
self.OrbitInfo.Date = ''
self.OrbitInfo.Time = ''
def OnParametersUpdate(self):
super(ModisProvider, self).OnParametersUpdate()
self.__BandWaveLenthList = self.GetParameter().BandWaveLengthList
self.__obsDataCount = len(self.__BandWaveLenthList)
self.CreateBandsInfo()
return
def SetLonLatFile(self, latfile, lonfile):
self.__HdfFileHandleList['Latitude'] = self.__HdfOperator.Open(latfile)
self.__HdfFileHandleList['Longitude'] = self.__HdfOperator.Open(lonfile)
def SetL1File(self, file):
self.__HdfFileHandleList['L1'] = self.__HdfOperator.Open(file)
# hdf = SD(FILE_NAME, SDC.READ)
if 'AQUA' in file:
self.OrbitInfo.Sensor = 'AQUA'
if '1KM' in file:
self.__dataRes = 1000
self.__dataWidth = 1354
self.__dataHeight = 3660
# self.__obsDataCount = 16
# self.__BandWaveLenthList = ['0046', '0051', '0064', '0086', '0160', '0230', '0390', '0620', '0700', '0730',
# '0860','0960','1040', '1120', '1230', '1330']
elif 'HKM' in file:
self.__dataRes = 500
self.__dataWidth = 2708
self.__dataHeight = 7320
# self.__obsDataCount = 1
# self.__BandWaveLenthList = ['0064']
else:
self.__dataRes = 250
self.__dataWidth = 5416
self.__dataHeight = 14640
else:
self.OrbitInfo.Sensor = 'TERRA'
if '1KM' in file:
self.__dataRes = 1000
self.__dataWidth = 1354
self.__dataHeight = 3660
# self.__obsDataCount = 16
# self.__BandWaveLenthList = ['0046', '0051', '0064', '0086', '0160', '0230', '0390', '0620', '0700', '0730',
# '0860','0960','1040', '1120', '1230', '1330']
elif 'HKM' in file:
self.__dataRes = 500
self.__dataWidth = 2708
self.__dataHeight = 7320
# self.__obsDataCount = 1
# self.__BandWaveLenthList = ['0064']
else:
self.__dataRes = 250
self.__dataWidth = 5416
self.__dataHeight = 14640
# self.__obsDataCount = 4
# self.__BandWaveLenthList = ['0046', '0051', '0064', '0086']
# else:
# self.__BandWaveLenthList = ['0064', '0086', '0160', '0230', '0390', '0620', '0700', '0730',
# '0860', '0960', '1040', '1120', '1230', '1330']
# self.__obsDataCount = 14
# path, filename = os.path.split(file)
# self.__description = filename.upper().replace('.HDF', '')
self.__InitOrbitInfo()
self.__description = self.OrbitInfo.Sat + '_' + self.OrbitInfo.Sensor + '_' + self.OrbitInfo.Date + '_' + self.OrbitInfo.Time
# def SetL1File(self, file):
#
# # self.__L1DataFileHandle = self.__HdfOperator.Open(file)
# self.__filehandel = self.__HdfOperator.Open(file)
# self.__fileName = file
# self.__InitOrbitInfo()
def SetAuxiliaryDataFile(self, LNDfile, LMKfile, DEMfile, COASTfile, SATZENfile, SATAZIfile, Lonfile, Latfile):
if LNDfile!='NULL':
self.__HdfFileHandleList['LandCover'] = self.__HdfOperator.Open(LNDfile)
self.__AuxiliaryDataNamesList['LandCover'] = 'LandCover'
if LMKfile!='NULL':
self.__HdfFileHandleList['Land/SeaMask'] = self.__HdfOperator.Open(LMKfile)
self.__AuxiliaryDataNamesList['Land/SeaMask'] = 'Land/SeaMask'
if DEMfile!='NULL':
self.__HdfFileHandleList['DEM'] = self.__HdfOperator.Open(DEMfile)
self.__AuxiliaryDataNamesList['DEM'] = 'DEM'
if COASTfile!='NULL':
self.__HdfFileHandleList['SeaCoast']= self.__HdfOperator.Open(COASTfile)
self.__AuxiliaryDataNamesList['SeaCoast'] = 'SeaCoast'
if SATZENfile!='NULL':
self.__HdfFileHandleList['SensorZenith']= self.__HdfOperator.Open(SATZENfile)
self.__AuxiliaryDataNamesList['SensorZenith'] = 'SensorZenith'
if SATAZIfile!='NULL':
self.__HdfFileHandleList['SensorAzimuth']= self.__HdfOperator.Open(SATAZIfile)
self.__AuxiliaryDataNamesList['SensorAzimuth'] = 'SensorAzimuth'
if Lonfile != 'NULL':
self.__AuxiliaryDataNamesList['Longitude'] = 'Longitude'
if Latfile != 'NULL':
self.__AuxiliaryDataNamesList['Latitude'] = 'Latitude'
return
def CreateBandsInfo(self):
index = 1
for wavelength in self.__BandWaveLenthList:
self.OrbitInfo.BandsWavelength['EVB'+str(index)] = wavelength
if int(wavelength) < 2135:
self.OrbitInfo.BandsType['EVB'+str(index)] = 'REF'
else:
self.OrbitInfo.BandsType['EVB'+str(index)] = 'EMIS'
index = index+1
def GetLongitude(self):
return self.GetDataSet('Longitude')
def GetLatitude(self):
return self.GetDataSet('Latitude')
def GetResolution(self):
return self.__dataRes
def GetOBSData(self, band):
self.__band = band
(self.__refbandname, self.__emisbandname) = self.__GetOBSDatasetName(band)
ret = None
if band in self.__refbandname:
ret=self.GetDataSet(band)
else:
ret = self.GetDataSet(band)
return ret
def __GetOBSDatasetName(self, band):
self.refBand = dict()
self.emisBand = dict()
self.refBandname = dict()
self.emisBandname = dict()
for band in self.OrbitInfo.BandsType:
if self.OrbitInfo.BandsType[band] == 'REF':
self.refBand[band] = self.OrbitInfo.BandsType[band]
else:
self.emisBand[band] = self.OrbitInfo.BandsType[band]
self.refBand = natsorted(self.refBand, alg=ns.IGNORECASE)
self.emisBand = natsorted(self.emisBand, alg=ns.IGNORECASE)
refNum = 0
for refband in self.refBand:
self.refBandname[refband] = refNum
refNum = refNum + 1
emisNum = 0
for emisband in self.emisBand:
self.emisBandname[emisband] = emisNum
emisNum = emisNum + 1
return self.refBandname, self.emisBandname
def GetOBSDataCount(self):
return self.__obsDataCount
def GetDataSet(self,band):
startLine = self.startLine
endlLine = self.endLine
ret = None
(self.__refbandname, self.__emisbandname) = self.__GetOBSDatasetName(band)
if band in self.__refbandname:
data = self.__HdfFileHandleList['L1'].select('EV_1KM_RefSB')
if startLine != -1 and endlLine != -1:
ret = data[self.__refbandname[self.__band], startLine:endlLine, :]
else:
ret = data[self.__refbandname[self.__band], :, :]
elif band in self.__emisbandname:
data = self.__HdfFileHandleList['L1'].select('EV_1KM_Emissive')
if startLine != -1 and endlLine != -1:
ret = data[self.__emisbandname[self.__band], startLine:endlLine, :]
else:
ret = data[self.__emisbandname[self.__band], :, :]
else:
data = self.__HdfFileHandleList[band].select(band)
if startLine != -1 & endlLine != -1:
ret = data[startLine:endlLine, :]
else:
ret = data[:, :]
return ret
def GetAuxiliaryData(self, dataname):
dsname = self.__AuxiliaryDataNamesList[dataname]
ret = None
if dsname == '':
return ret
ret = self.GetDataSet(dsname)
return ret
def GetAuxiliaryDataNamesList(self):
return self.__AuxiliaryDataNamesList
#
# def GetSensorZenith(self):
# return self.GetDataSet(self.__DataFileHandle,'/','NOMSatelliteZenith')
#
# def GetSolarAzimuth(self):
# return self.GetDataSet(self.__DataFileHandle,'/','NOMSunAzimuth')
#
# def GetSolarZenith(self):
# return self.GetDataSet(self.__DataFileHandle,'/','NOMSunZenith')
# def GetEmissData(self, band):
# return
# def SetInputString(self,value):
# self.InputString = value
#
# def GetInputString(self):
# return self.InputString
def GetDataDescription(self):
if self.__description == 'NULL':
self.__description = self.GetParameter().GetParamDescription() + '_' + str(
self.GetParameter().ProjectResolution)
return self.__description
| [
"21797lijiao"
] | 21797lijiao |
b7d7f1bc6ee38581a5c6d1a8bdc2310c73124071 | ad8ad2e32f8d83e4ef00bdd78ce475776258702a | /square_sum_recursion_all_variants.py | 0c8081d89c882c4062f4e2f52e4ce295d2fba650 | [
"MIT"
] | permissive | GudniNathan/square_sum | b640255f7e73bc90c9e46b7144d4b8ed74fe2447 | 917c5cf4ae92d601a1981a7a706727dfe7d727a6 | refs/heads/master | 2022-12-18T06:51:25.577352 | 2019-03-23T01:48:12 | 2019-03-23T01:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | # This variant of the program will show every single possible way to complete a Square-Sum Hamiltonian Path
# with the chosen number.
# Note that half of these are just the reverse of the other half.
import threading
import time
from timeit import default_timer as timer
print("Choose length:")
count = int(input())
sqnum = list()
start = timer()
confirmed = list()
printed = list()
active = [True]
new = [False]
li = [i for i in range(count, 0, -1)]
for i in range(count, 1, -1):
if i ** 2 < count * 2:
sqnum.append(i ** 2)
def squareSum(i):
seq = i
if len(seq) == count or not active[0]:
confirmed.append(seq)
new[0] = True
return
for s in sqnum:
n = s - seq[-1]
if 0 < n <= count and n not in seq:
squareSum(seq + [n])
def check(confirmed):
if len(confirmed):
if new[0]:
for seq in range(len(printed), len(confirmed)):
print(confirmed[seq])
printed.append(confirmed[seq])
for number in li:
thread = threading.Thread(target=squareSum, args=([number],)).start()
check(confirmed)
while len(threading.enumerate()) > 1:
check(confirmed)
time.sleep(1)
if len(confirmed) == 0:
print("No solution was found")
else:
c = len(list(set(map(tuple, confirmed))))
print("Found %d solutions. That's %d excluding mirror duplicates." % (c, c / 2))
print(str(timer() - start), "sec runtime")
| [
"1493259+GudniNatan@users.noreply.github.com"
] | 1493259+GudniNatan@users.noreply.github.com |
736895652d7acd47dd0b3beb52f7fe26a8ea60e4 | 7baf7106a41a9101e2677ccf336d7163f744d4c9 | /ScreenShot_App.py | 8a3a65d51bb7f27aba1aca06cdbd9d138b05946f | [] | no_license | nirsa1001/Python_Projects | 39b5c6a0cd9d9570380ee9aa575742b6dd987357 | d73f23d82fe06a1c3737b0c08d7be2ac62199019 | refs/heads/master | 2022-11-17T22:58:52.715689 | 2020-07-18T16:41:41 | 2020-07-18T16:41:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import time
import pyautogui
import tkinter as tk
def screenshot():
name = int(round(time.time() * 1000))
name = 'C:/Users/gsc-30431/PycharmProjects/test1.py/Screenshot_App/{}.png'.format(name)
# Update the Directory path above where the screenshots will be saved.
img = pyautogui.screenshot(name)
img.show()
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
button = tk.Button(
frame,
text="Take Screenshot",
command=screenshot)
button.pack(side=tk.LEFT)
close = tk.Button(
frame,
text="Quit",
command=quit)
close.pack(side=tk.LEFT)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
47ede935441605d7d56f33de91b7e10d1f544291 | 930309163b930559929323647b8d82238724f392 | /sumitb2019_c.py | 8ebf6c2adc23f64ec6e3e5122b0e1896defd65e2 | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import itertools
X = int(input())
ps = [100, 101, 102, 103, 104, 105]
dp=[0]*(X+1)
dp[0] = 1
for p in ps:
for i in range(len(dp)):
if i >= p:
dp[i] = max(dp[i], dp[i-p])
print(dp[X])
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
a44eef4aa7259c94d66a70938ae38b76bea2755e | 0761c57443d2491b00753a6545395f682be27273 | /PythonProgramming/4-20/dictionary.py | 21f380ac66cc8613b2783f781af26fdf851de376 | [] | no_license | MorgFost96/School-Projects | 842835f97c025ee97e106540f2e6f03f5fdac563 | 9c86a4133e7cb587d7ad15af8da962278636db1f | refs/heads/master | 2020-09-21T22:19:49.494044 | 2019-11-30T22:19:56 | 2019-11-30T22:19:56 | 224,951,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,920 | py | # Dictionary
# - An objet that stores a collection of data.
# - Each element has two parts:
# - Key
# - Value
# - The key is used to locate a specific value
# ------------------
# English Dictionary
# ------------------
# - The word = Key
# - Definition = Value
#
# - emp_ID = Key
# - emp_name = Value
# -------------------
# Creating Dictionary
# -------------------
# - { key : value }
phonebook = { "Chris" : "555-1111",
"Kate" : "555-2222",
"John" : "555-3333" }
# ----
# Keys
# ----
# - Immutable
# - Int, String, or Tuple
# ------
# Values
# ------
# - Anything
# - Mutable
# ----------
# Dictionary
# ----------
# - Mutable
# ---------------
# Retrieve Values
# ---------------
print( phonebook )
# >>> { 'Chris' : '555-1111', 'John' : '555-3333', 'Kate' : '555-2222' }
# - Order Changes
# - Cannot use index
# - Must use key to retrieve information
print( phonebook[ "Chris" ] )
# >>> "555-1111"
# - Keys are Case Sensitive
# ---------------------
# In or Not In Operator
# ---------------------
if "Chris" in phonebook:
print( phonebook[ "Chris" ] )
elif "Chris" not in phonebook:
print( "'Chris' was not found in 'phonebook'" )
# --------------------------
# Add Elements to Dictionary
# --------------------------
# - Cannot have duplicate keys
phonebook[ "Joe" ] = "555-4444"
print( phonebook )
# >>> { 'Chris' : '555-1111', 'John' : '555-3333', 'Kate' : '555-2222', 'Joe' : '555-4444' }
phonebook[ "Chris" ] = "555-0123"
print( phonebook )
# >>> { 'Chris' : '555-0123', 'John' : '555-3333', 'Kate' : '555-2222', 'Joe' : '555-4444' }
# -----------------
# Deleting Elements
# -----------------
# - If the key is found, it will be removed
# - Otherwise there will be a KeyError Exception raised
del phonebook[ 'Chris' ]
print( phonebook )
# >>> { 'John' : '555-3333', 'Kate' : '555-2222', 'Joe' : '555-4444' }
# ------
# Length
# ------
num_items = len( phonebook )
print( num_items )
# >>> 3
# -----------------
# Mixing Data Types
# -----------------
test_scores = { "Kay" : [ 100, 88, 92, 89 ],
"Luis" : [ 35, 37, 45, 57 ],
"Sophia" : [ 85, 93, 83, 58 ],
"Ethan" : [ 99, 98, 90, 100 ] }
print( test_scores[ "Sophia" ] )
# >>> [ 85, 93, 83, 58 ]
# ----------------------------
# Creating an Empty Dictionary
# ----------------------------
empty = {}
# -----
# Loops
# -----
for key in phonebook:
print( key )
# >>> John
# Kate
# Joe
for key in phonebook:
print( key, phonebook[ key ] )
# >>> John 555-3333
# Kate 555-2222
# Joe 555-4444
# -------
# Methods
# -------
# - clear()
# - Clears all elements in a dictionary
# - Ex. phonebook.clear()
# print( phonebook )
# >>> {}
# - get()
# - Ex. value = phonebook.get( 'Kate', 'Entry not Found' )
# print( value )
# >>> 555-2222
# - keys()
# - Ex. for key in phonebook.keys():
# print( key )
# >>> John
# Kate
# Joe
# - pop()
# - Returns value with specified key then removes the key-value
# pair from the dictionary
# - Ex. phone_num = phonebook.pop( "John", "Entry not Found" )
# print( phone_num )
# >>> '555 - 1111'
# - John is no longer in the dictionary
# - popitem()
# - Returns randomly selected key-value pair then removes the
# key-value pair from the dictionary
# - Returns a tuple of ( key value ) if assigned to one variable
# - Ex. key, value = phonebook.popitem()
# print( key, vallue )
# >>> John 555-3333
# - Ex. key = phonebook.popitem()
# print( key )
# >>> ( 'John', '555-3333' )
# - values()
# - Returns the values
# - Ex. for val in phonebook.values():
# print( val )
# >>> 555-3333
# 555-2222
# 555-4444
| [
"morgfost96@gmail.com"
] | morgfost96@gmail.com |
566469cd1b76d202192417ecad22a99c3b57c032 | 357891fd990f30bf7e7a4e5bca6f8e74ed288e07 | /seleniumdemo.py | 3f93e8691b5f75a7fa6741a1e962e8f121d4a238 | [] | no_license | shuo502/seleniumdemo | 984b93b3dc7e32ae1ba6214a69f304e5f958d447 | 9805d0f80fec7863dec223f651bac0f6d665638a | refs/heads/master | 2020-06-24T12:24:53.819201 | 2019-08-15T10:36:47 | 2019-08-15T10:36:47 | 198,962,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,403 | py | # -*- coding: utf-8 -*-
# @Time : 2019/7/25 18:36
# @Author : Yo
# @Email : shuo502@163.com
# @File : seleniumdemo.py
# @Software: PyCharm
# @models: ..
# @function: ...
# @Git: https://gitee.com/m7n9/PyCharm.git
# @Edit: yo
# path = "/chromedriver"
# driver = webdriver.Chrome(path)
# driver=driver.ChromeOptions()
# driver.add_argument('--headless')
# driver.add_argument('--no-sandbox')
# driver.add_argument('--disable-gpu')
# driver.add_argument('--disable-dev-shm-usage')
#
#yum install xvfb
# yum install Xvfb
#yum install xdpyinfo
# echo [google-chrome] >>/etc/yum.repos.d/google-chrome.repo
# echo name=google-chrome >>/etc/yum.repos.d/google-chrome.repo
# echo baseurl=http://dl.google.com/linux/chrome/rpm/stable/x86_64 >>/etc/yum.repos.d/google-chrome.repo
# echo enabled=1 >>/etc/yum.repos.d/google-chrome.repo
# echo gpgcheck=1 >>/etc/yum.repos.d/google-chrome.repo
# echo gpgkey=https://dl.google.com/linux/linux_signing_key.pub >>/etc/yum.repos.d/google-chrome.repo
# yum install google-chrome
#python -m pip install --upgrade pip
#pip install --upgrade pip
#pip install selenium
#pip install pyvirtualdisplay
#rpm -i google-chrome-beta-76.0.3809.36-1.x86_64.rpm
try:
from pyvirtualdisplay import Display
display = Display(visible=0, size=(800, 600))
display.start()
except:
pass
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
# 设置 chrome 二进制文件位置 (binary_location)
# 添加启动参数 (add_argument)
# 添加扩展应用 (add_extension, add_encoded_extension)
# 添加实验性质的设置参数 (add_experimental_option)
# 设置调试器地址 (debugger_address)
chrome_options = Options()
# chrome_options.add_argument('lang=zh_CN.UTF-8')#设置为中文 -
# chrome_options.add_argument('--headless') #不显示界面 模式浏览器 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败-
chrome_options.add_argument('lang=en_US.UTF-8')#设置为英文 -
chrome_options.add_argument('--disable-gpu')#关闭GPU 谷歌文档提到需要加上这个属性来规避bug -
chrome_options.add_argument("--disable-extensions")#禁用浏览器扩展弹出菜单
chrome_options.add_argument("--test-type")# 忽略证书错误
chrome_options.add_argument('--no-sandbox')#不启用沙箱安全模式 -
chrome_options.add_argument('window-size=1920x1080') #设置浏览器分辨率(窗口大小) -
chrome_options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面 -
# chrome_options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度 -
chrome_options.add_argument('-disable-dev-shm-usage')#不启用GUI 界面
chrome_options.add_argument("--start-maximized")#最大化运行(全屏窗口),不设置,取元素会报错
chrome_options.add_argument("--js-flags=--harmony")#启用js ES6Harmony功能 -
# chrome_options.add_argument('--disable-javascript') # 禁用javascript
chrome_options.add_argument('--incognito') # 隐身模式(无痕模式)
chrome_options.add_argument('--disable-infobars') # 禁用浏览器正在被自动化程序控制的提示
chrome_options.add_argument('--ignore-certificate-errors') # 禁用扩展插件并实现窗口最大化
chrome_options.add_argument('–disable-software-rasterizer')
# chrome_options.add_argument('--user-data-dir=C:\Users\Administrator\AppData\Local\Google\Chrome\User Data') #设置成用户自己的数据目录
extension_path = '/extension/path'
extension_path = '/extension/AdBlock_v2.17.crx'
# chrome_options.add_extension(extension_path)#添加扩展
#-------------使用代理-------
# PROXY = "proxy_host:proxy:port"
# options = webdriver.ChromeOptions()
# desired_capabilities = options.to_capabilities()
# desired_capabilities['proxy'] = {
# "httpProxy":PROXY,
# "ftpProxy":PROXY,
# "sslProxy":PROXY,
# "noProxy":None,
# "proxyType":"MANUAL",
# "class":"org.openqa.selenium.Proxy",
# "autodetect":False
# }
# driver = webdriver.Chrome(desired_capabilities = desired_capabilities)
#---------------------
# chrome_options.add_argument( "user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36")
# chrome_options.add_argument("user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.36 Safari/537.36")
# chrome_options.add_argument( "user-agent='Mozilla/5.0 (Wbaidu.comindows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.36 Safari/537.36")
chrome_options.add_argument( "user-agent=User-Agent,Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50")
# chrome_options.add_argument("user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
# driver = webdriver.Chrome('/chromedriver',chrome_options=chrome_options)
driver = webdriver.Chrome(chrome_options=chrome_options)
try:
# driver.get("http://www.hao828.com/yingyong/useragent/")
# driver.get("http://tools.jb51.net/aideddesign/browser_info")
# driver.get("http://baidu.com")
driver.get("http://t.tjdcd.com/crypt/msg?id=440")
time.sleep(60)
print("success")
print(driver.page_source)
time.sleep(60)
print(driver.page_source)
# driver.save_screenshot('screenshot.png')
# driver.close()#关闭窗口
# driver.quit()#关闭浏览器
except Exception as s:
print(s)
# driver.close()#关闭窗口
# driver.quit()#关闭浏览器
# driver.find_element_by_xpath("//div[text()='站点:']").click()
try:
display.stop()
except:
pass
#执行js
def run_js():
js = '''
document.querySelector("#J-loginMethod-tabs > li:nth-child(2)").click()
'''
driver.execute_script(js)
def sendkey():
user = driver.find_elements_by_name("logonId")[0]
user.send_keys("your user")
password = driver.find_element_by_id("password_rsainput")
password1 = 'your pass'
# 缓慢输入
for i in range(password1.__len__()): # 根据你的密码长度设置
# time.sleep(random.random())
password.send_keys(password1[i])
print("输入", password1[i])
time.sleep(1)
button = driver.find_element_by_id("J-login-btn")
# 一定要等待足够时间才可以
time.sleep(10)
button.click()
def shuangji(driverChrome):
from selenium.webdriver.common.action_chains import ActionChains
# 鼠标双击事件
double = driverChrome.find_element_by_xpath('//*[@id="dynamicLayout_0"]/div/div/dl/dt/a')
ActionChains(driverChrome).double_click(double).perform()
# 拖动
source = driverChrome.find_element_by_xpath('path1')
target = driverChrome.find_element_by_xpath('path2')
ActionChains(driverChrome).drag_and_drop(source, target).perform()
# 鼠标移到元素上
above = driverChrome.find_element_by_xpath('//*[@id="dynamicLayout_0"]/div/div/dl/dd[2]/span/i')
ActionChains(driverChrome).move_to_element(above).perform()
# 鼠标移右击事件
right = driverChrome.find_element_by_xpath('//*[@id="layoutMain"]/div/div[2]/div/div/div[4]/div/div/dd/div[2]')
ActionChains(driverChrome).context_click(right).perform()
# 单击hold住
left_hold = driverChrome.find_element_by_xpath('path')
ActionChains(driverChrome).click_and_hold(left_hold).perform()
#鼠标双击
double = driverChrome.find_element_by_xpath('//*[@id="dynamicLayout_0"]/div/div/dl/dt/a')
ActionChains(driverChrome).double_click(double).perform()
print("双击成功")
#鼠标移动
above = driverChrome.find_element_by_xpath('//*[@id="dynamicLayout_0"]/div/div/dl/dd[2]/span/i')
ActionChains(driverChrome).move_to_element(above).perform()
print("移动成功")
#尝试层级定位,定位左侧音乐文字链接
uClass = driverChrome.find_element_by_class_name('fOHAbxb')
liList = uClass.find_elements_by_tag_name('li') #定位一组li
for li in liList:
if li.get_attribute('data-key') =='music': #音乐选项
li.click()
#定位右侧第一条音乐信息
musicL = driverChrome.find_element_by_class_name("NHcGw")
musicList = musicL.find_elements_by_tag_name("dd")
for d in musicList:
if d.get_attribute('_position')=='0':
print("获得第一首歌")
#d.click()
ActionChains(driverChrome).move_to_element(d).perform()
ActionChains(driverChrome).context_click(d).perform() # 点击右键
#弹出框定位
element1 = driverChrome.find_element_by_class_name("list")
#定位
liEliment = element1.find_elements_by_tag_name('li')
for li in liEliment:
if li.text =='下载':
li.click()
print("右击成功")
#
#
# 921 source env/bin/activate
# 923 pip install selenium
# 924 pip install --upgrade pip
# 925 pip install selenium
# 926 yum install xvfb
#
# 927 pip install pyvirtualdisplay
# 929 wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
# 930 apt install ./google-chrome-stable_current_amd64.deb
# 931 yum install google-chrome
# 932 /etc/yum.repos.d/google-chrome.repo
# 933 vi /etc/yum.repos.d/google-chrome.repo
# 934 yum install google-chrome
# 935 yum install Xvfb
# 936 yum -install libXfont
# 937 pip install selenium
# 938 https://sites.google.com/a/chromium.org/chromedriver/home.
# 939 curl https://sites.google.com/a/chromium.org/chromedriver/home
# 940 wget https://chromedriver.storage.googleapis.com/75.0.3770.140/chromedriver_linux64.zip
# 941 vi seleniumdemo.py
# 942 ls
# 943 rm google-chrome-stable_current_amd64.deb -rf
# 944 unzip chromedriver_linux64.zip
# 945 ls
# 946 vi seleniumdemo.py
# 947 python seleniumdemo.py
# 948 ls
# 949 vi seleniumdemo.py
# 950 python seleniumdemo.py
# 951 source /env/bin/activate
# 952 cd /
# 953 vi s.py
# 954 cat seleniumdemo.py
# 955 vi s.py
# 956 python s.py
# 957 yum install xdpyinfo
# 958 python s.py
# 959 ls
# 960 ll
# 961 chmod chromedriver
# 962 chmod 777 chromedriver
# 963 ll
# 964 python s.py
# 965 vi s.py
# 966 python s.py
# 967 vi s.py
# 968 python s.py
# 969 vi s.py
# 970 python s.py
# 971 ls
# 972 cd env/
# 973 ls
# 974 vd bin
# 975 cd bin
# 976 ls
# 977 cp /
# 978 cp /chromedriver ./
# 979 ls
# 980 ll
# 981 python /s.py
# 982 vi /s.py
# 983 python /s.py
# 984 vi /s.py
# 985 python /s.py/
# 986 python /s.py
# 987 yum install chrome
# 988 yum
# 989 ls
# 990 yum
# 991 history
# 992 yum google-chrome
# 993 yum install google-chrome
# 994 ls
# 995 yum install google-chrome
# 996 yum remove google-chrome-unstable-77.0.3860.5-1.x86_64 already
# 997 yum search google
# 998 yum search google-chrome
# 999 cd /
# 1000 ls
# 1001 wget http://130.235.83.22/public/CentOS-7/x86_64/google.x86_64/google-chrome-beta-76.0.3809.36-1.x86_64.rpm
# 1002 ls
# 1003 rpm -i google-chrome-beta-76.0.3809.36-1.x86_64.rpm
# 1004 rm chromedriver -rf
# 1005 rm chromedriver_linux64.zip -rf
# 1006 wget https://chromedriver.storage.googleapis.com/index.html?path=76.0.3809.68/
# 1007 ls
# 1008 wget https://chromedriver.storage.googleapis.com/76.0.3809.68/chromedriver_linux64.zip
# 1009 ls
# 1010 unzip chromedriver_linux64.zip
# 1011 ls
# 1
#
# 1046 pip install pyvirtualdisplay
| [
"shuo502@163.com"
] | shuo502@163.com |
0037015439481d0e18bcfef36abb4692897ad45d | 6fad6f220c4ac5e52b7a7a082c63b81320102a2a | /proyectos/10/src/jackGrammarVisitor.py | ad1f0c487b2dbd750bda5c7d858324ed42a7349b | [] | no_license | lmvasquezg/Computer-architecture | 512ba5fc85dacaa85803a8c9e3fa1a847b1427b1 | 1352b9ce6b95f335d4d3d1e2c3080cbc4625213a | refs/heads/master | 2022-04-11T07:53:41.237720 | 2020-03-09T14:16:37 | 2020-03-09T14:16:37 | 192,032,115 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,871 | py | # Generated from jackGrammar.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .jackGrammarParser import jackGrammarParser
else:
from jackGrammarParser import jackGrammarParser
# This class defines a complete generic visitor for a parse tree produced by jackGrammarParser.
class jackGrammarVisitor(ParseTreeVisitor):
# Visit a parse tree produced by jackGrammarParser#classNT.
def visitClassNT(self, ctx:jackGrammarParser.ClassNTContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#classVarDec.
def visitClassVarDec(self, ctx:jackGrammarParser.ClassVarDecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#jackType.
def visitJackType(self, ctx:jackGrammarParser.JackTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#subroutineDec.
def visitSubroutineDec(self, ctx:jackGrammarParser.SubroutineDecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#parameterList.
def visitParameterList(self, ctx:jackGrammarParser.ParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#subroutineBody.
def visitSubroutineBody(self, ctx:jackGrammarParser.SubroutineBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#varDec.
def visitVarDec(self, ctx:jackGrammarParser.VarDecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#className.
def visitClassName(self, ctx:jackGrammarParser.ClassNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#subroutineName.
def visitSubroutineName(self, ctx:jackGrammarParser.SubroutineNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#varName.
def visitVarName(self, ctx:jackGrammarParser.VarNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#statements.
def visitStatements(self, ctx:jackGrammarParser.StatementsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#statement.
def visitStatement(self, ctx:jackGrammarParser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#letStatement.
def visitLetStatement(self, ctx:jackGrammarParser.LetStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#ifStatement.
def visitIfStatement(self, ctx:jackGrammarParser.IfStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#whileStatement.
def visitWhileStatement(self, ctx:jackGrammarParser.WhileStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#doStatement.
def visitDoStatement(self, ctx:jackGrammarParser.DoStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#returnStatement.
def visitReturnStatement(self, ctx:jackGrammarParser.ReturnStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#expression.
def visitExpression(self, ctx:jackGrammarParser.ExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#term.
def visitTerm(self, ctx:jackGrammarParser.TermContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#subroutineCall.
def visitSubroutineCall(self, ctx:jackGrammarParser.SubroutineCallContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#expressionList.
def visitExpressionList(self, ctx:jackGrammarParser.ExpressionListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#op.
def visitOp(self, ctx:jackGrammarParser.OpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#unaryOp.
def visitUnaryOp(self, ctx:jackGrammarParser.UnaryOpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by jackGrammarParser#keywordConstant.
def visitKeywordConstant(self, ctx:jackGrammarParser.KeywordConstantContext):
return self.visitChildren(ctx)
del jackGrammarParser | [
"lmvasquezg@MacBook-Pro-de-Luisa-2.local"
] | lmvasquezg@MacBook-Pro-de-Luisa-2.local |
6afad1eb9a9749a808aa04ff852f4ed7cf4fb72b | 889d13d15084f12e84731f48f50c72169f4ca45f | /public/class03demos/class03p10.py | d49c82eb8a80a9c4ac35087d43a3a802aada5e9c | [] | no_license | puneet-khatod/ml4us | 1bb4a661f3d59d8d0b7ff9e959b2f51324c7a9c9 | 917cdac85086bfc82f03e3db3ba8e7b15f9c407b | refs/heads/master | 2021-05-06T15:59:13.646649 | 2017-12-09T08:03:30 | 2017-12-09T08:03:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | """
class03p10.py
This script should use Pandas to plot prices of GSPC for 2016.
"""
import pandas as pd
import matplotlib.pyplot as plt
csvfile = 'http://spy611.herokuapp.com/csv/allpredictions.csv'
# Goog: In pandas how to sort a dataframe?
cp_df = pd.read_csv(csvfile).sort_values(['cdate'])
# Goog: In pandas how to filter?
cp2016_sr = (cp_df.cdate > '2016') & (cp_df.cdate < '2017')
cp2016_df = cp_df[['cdate','cp']][cp2016_sr]
# I should plot
cpdate2016_df = cp2016_df.set_index(['cdate'])
# Goog: In Pandas what is an index?
# Goog: In Pandas what does set_index do?
cpdate2016_df.plot.line(title="GSPC 2016")
plt.show() # This line might be slow
'bye'
| [
"bikle@bikle.com"
] | bikle@bikle.com |
9d7639d189d421797740d682aac51312abee9e92 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4011/codes/1745_1531.py | f673af6f93026f7831e6b2d8cc72542a9d884f67 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from math import*
x = eval(input("radiano: "))
k = int(input("Quantidade de termos da serie: "))
n = 0
soma =
while(n < k):
n = n + 1
sinal = (x**(2 + 2*n)/factorial(2*n))
sinal = - sinal
soma = sinal + sinal
print(round(serie, 10)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
f7d6009afe642598dd42b9bec2f9d912ca79f499 | 99b3a729c1240b022b7d58a31dd0d6236ba62243 | /codes/verify_data.py | 9201cf86dfa142d3ff85649d4bdf05a881f4ed07 | [
"Apache-2.0"
] | permissive | deergoose/BasicSR | bdc92a4510bcf68f86392410bad5832722ae86d2 | d8ee9e84a308ad47e7cc97adc4a282f34cbdb956 | refs/heads/master | 2020-07-06T00:02:59.398073 | 2019-04-27T20:22:17 | 2019-04-27T20:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import argparse
import matplotlib.pyplot as plt
from data.dstl_dataset.dataset import DstlDataset
import options.options as option
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
opt = option.dict_to_nonedict(opt) # Convert to NoneDict, which return None for missing key.
train_set = DstlDataset(opt['datasets']['train'])
for i in range(100):
data = train_set[i]
| [
"zhoufang@ucsd.edu"
] | zhoufang@ucsd.edu |
b1c393d0df4595e8737420b0882fdf0ac70e062d | 3aaa3640b929ac46dc7a4b9fada59be615079576 | /demos/helloworld/helloworld.py | 06eac9290f968b6e70163a72e50faebe22da1aeb | [
"Apache-2.0"
] | permissive | joobih/tornado_redis_yield | f840c44362cc57b0cd4fd813b9875b3a29369381 | 9c2a81a87bbbbca50b57d23029e4b78192bedf17 | refs/heads/master | 2021-05-25T09:14:49.990681 | 2018-04-02T08:53:23 | 2018-04-02T08:53:23 | 126,953,130 | 1 | 1 | Apache-2.0 | 2021-04-20T17:19:10 | 2018-03-27T08:19:46 | Python | UTF-8 | Python | false | false | 1,222 | py | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| [
"1006536507@qq.com"
] | 1006536507@qq.com |
d769330af821d7a74f0e733e79db3212a3b06fbd | 85f95f90f0512d7771f4d3a4a7e949182d72f5b4 | /jumble_solve2.py | 37b406f81d9517cd9bb166dfb44754df267f5b84 | [] | no_license | Mattmlm/jumblesolve | b35ca25dcc650265fb2dd089603e79ab10ad1e52 | ba870c42c5249b76d011ca31b154f592d8233c04 | refs/heads/master | 2016-09-05T19:09:53.756483 | 2014-02-18T20:19:28 | 2014-02-18T20:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,001 | py | # jumble_solve2.py
import sys
def prefix_tree(filename):
dictionary_tree = dict()
fin = open(filename)
for line in fin:
word = line.strip()
if word[0] in dictionary_tree.keys():
updated_list = dictionary_tree[word[0]] + [word]
dictionary_tree[word[0]] = updated_list
else:
dictionary_tree[word[0]] = [word]
return dictionary_tree
def word_exists(test_word, dictionary_list):
words_to_check = dictionary_list[test_word[0]]
for word in words_to_check:
if (word == test_word):
return True
return False
def jumble_solve(word_list):
dict_list = prefix_tree('words.txt')
updated_word_list = []
for word in word_list:
if word_exists(word, dict_list):
updated_word_list.append(word)
return updated_word_list
# Inputs:
# - list type, word_list
# - string, word
# - list type, remaining_letters
def permute_words(word_list, word, remaining_letters):
# Get the unique letters of the remaining_letters (prevent duplicates)
unique_letters = set(remaining_letters)
# print "unique_letters"
# print unique_letters
# Base case, last letter
if len(remaining_letters) == 1:
# Add only unique letters
for letter in unique_letters:
word_list.append(word + letter)
# Recursive call
else:
for letter in unique_letters:
# print "letter"
# print letter
# Updated parameters for recursive call
new_word = word + letter
# print "remaining_letters"
# print remaining_letters
x = remaining_letters.index(letter)
new_remaining_letters = remaining_letters[:x] + remaining_letters[x+1:]
# print "new_remaining_letters"
# print new_remaining_letters
# print "remaining_letters checking"
# print remaining_letters
word_list.append(new_word)
permute_words(word_list, new_word, new_remaining_letters)
return word_list
original_permutation = permute_words([], "", list(sys.argv[1]))
print "Length of original permutation is: " + str(len(original_permutation))
# print original_permutation
print jumble_solve(original_permutation) | [
"matthew.l.mo@gmail.com"
] | matthew.l.mo@gmail.com |
4970dcdf1544b9e15abe1d6a4a00f1534ace0616 | f9e5613901957409ca535468af7301ddfbf2512e | /lmru.py | 2f0f1eb4021c923e59517e4f26eae4fbe2cac9dd | [] | no_license | OlgaP-1/Metods-sbora-i-obrabotki-bases | 6f3b0d6a283086dc6315328bd93303dca179d29c | a0a4ed4478079880fee3f7dad68ad76ece218e45 | refs/heads/main | 2023-04-01T10:57:42.167990 | 2021-03-30T18:17:06 | 2021-03-30T18:17:06 | 344,433,801 | 0 | 0 | null | 2021-04-08T22:09:39 | 2021-03-04T10:22:35 | Python | UTF-8 | Python | false | false | 2,217 | py | import scrapy
from scrapy.http import HtmlResponse
from leruaMerlen.items import LeruamerlenItem
from scrapy.loader import ItemLoader
class LmruSpider(scrapy.Spider):
name = 'lmru'
allowed_domains = ['leroymerlin.ru']
def __init__(self, search):
super().__init__()
self.start_urls = [f'https://leroymerlin.ru/search/?q={search}']
def parse(self, response: HtmlResponse):
links = response.xpath('//a[contains(@class, "plp-item__info__title")]/@href').extract()
# находим ссылку на следующую страницу
next_page = response.xpath('//a[@class= "paginator-button next-paginator-button"]/@href').extract_first()
for link in links:
yield response.follow(link, callback=self.parse_item)
#переходим на следующую страницу и возвращаем метод parse для парсинга новой страницы
yield response.follow(next_page, callback=self.parse)
def parse_item(self, response: HtmlResponse): #обрабатываем каждый товар
loader = ItemLoader(item=LeruamerlenItem(), response=response)
loader.add_xpath('photos', '//img[contains(@alt, "product image")]/@src')
loader.add_xpath('name', '//h1[@itemprop="name"]/text()')
loader.add_xpath('price', f'//uc-pdp-price-view[contains(@class, "primary-price")]//span[@slot="price"]/text() | .//uc-pdp-price-view[contains(@class, "primary-price")]//span[@slot="fract"]/text()')
loader.add_xpath('price_square', '//uc-pdp-price-view[contains(@class, "second-price")]//span[@slot="price"]/text()')
loader.add_xpath('href', '//uc-regions-overlay-item/a[contains(@class, "region-link highlighted new")]/@href')
loader.add_xpath('features_keys', '//dt[contains(@class, "def-list__term")]/text()')
loader.add_xpath('features', '//dd[contains(@class, "def-list__definition")]/text()')
# по коду ниже под '#' парсинг не работает по не понятной причине
# loader.add_xpath('dic', '//div[contains(@class, "def-list__group")]/text()')
yield loader.load_item()
| [
"noreply@github.com"
] | noreply@github.com |
362de125dfff44ad410d7bef2f1dd56984cbfb60 | bdcfcb7924633a11ef1cfacc283ee2116ebf87e3 | /neuralTPPs/tpp/models/base/process.py | a10e0aab4f111779348bfcc758b6e2ce0e2e3eb2 | [
"Apache-2.0"
] | permissive | lithces/tpps | d36c98f3554123a82524f96366fdb419606c9a85 | 0bdf84f37c91023ffbe06146efa1889d9a1e9f9b | refs/heads/master | 2023-08-23T13:24:02.128993 | 2021-11-05T22:35:48 | 2021-11-05T22:35:48 | 425,101,008 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | import abc
import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.utils.events import Events
class Process(nn.Module):
def __init__(self, name: str, marks: Optional[int] = 1, **kwargs):
"""A parametric process.
Args:
name: The name of the process.
marks: The distinct number of marks (classes) for the process.
Defaults to 1.
"""
super(Process, self).__init__()
self.name = name
self.marks = marks
@abc.abstractmethod
def intensity(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the intensities at query times given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
intensities: [B,T,M] The intensities for each query time for each
mark (class).
intensity_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
pass
@abc.abstractmethod
def neg_log_likelihood(
self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the negative log likelihood of events.
Args:
events: [B,L] Times and labels of events.
Returns:
nll: [B] The negative log likelihoods for each sequence.
nll_mask: [B] Which neg_log_likelihoods are valid for further
computation based on e.g. at least one element in sequence has
a contribution.
artifacts: Other useful quantities.
"""
pass
| [
"lithium7456@gmail.com"
] | lithium7456@gmail.com |
c6c285a03178805fd2966399625ea4b885c8db09 | 96b0fdb57d99add48823cd80dfb822af0df18e22 | /bin/wheel | 166f9b141ab4b9f56da78f02d68d2e79b6dd5d69 | [] | no_license | sebfio/mte546project | 39e368dd0a1aff20f8f480b242bebba9fe24ae0c | 98027eec4382c2449d0a7cfd4fd43354457f3d76 | refs/heads/master | 2023-02-03T15:25:03.388192 | 2020-01-08T03:08:35 | 2020-01-08T03:08:35 | 176,009,525 | 0 | 0 | null | 2023-02-02T04:54:08 | 2019-03-16T18:29:41 | Python | UTF-8 | Python | false | false | 224 | #!/home/sxf/mte546/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sebf465@gmail.com"
] | sebf465@gmail.com | |
c03a4aa677a7ee28e15c3cdb81e2a3b9d4b0830f | bfc551b280079889255383ebf0fede2a5a724198 | /bin/django-admin.py | b6c7819ebd994a7e6f06b38970ebe032a73f5e76 | [] | no_license | Wolf4091/AdvertisingMat-site | ebf1b602fdbf6bfefdb2e1a82b9d8642b1ae2d1c | c26798dcb24309a13f280bbdcdddcba8198b00b4 | refs/heads/master | 2020-03-18T18:33:22.459061 | 2018-06-11T06:52:21 | 2018-06-11T06:52:21 | 135,100,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!/home/ian/pythonprojs/selfsite/MySite/bin/python2
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"iclark@idevops.io"
] | iclark@idevops.io |
f5dd2bb68d941f22a8ece354d5ebe4a7ff628fca | 736250d9d14552c5fa0aca25b25d9c8a28fcd1a0 | /mtmpro/mtmapp/migrations/0001_initial.py | 42368fcde0eff7c119ef4b9236f3139dcdb96da7 | [] | no_license | maheswatapradhan/feedback | 57f052a2082902cb8a72b474e0b863b7a00d1c9c | 31c7dcb113a38e29b3a56481fcb9ae2fce7d61a2 | refs/heads/master | 2020-09-15T23:42:32.041306 | 2019-11-23T12:54:25 | 2019-11-23T12:54:25 | 223,585,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-09-16 11:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cno', models.IntegerField()),
('cname', models.CharField(max_length=100)),
('fee', models.IntegerField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sno', models.IntegerField()),
('sname', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('marks', models.IntegerField()),
],
),
migrations.AddField(
model_name='course',
name='student',
field=models.ManyToManyField(to='mtmapp.Student'),
),
]
| [
"test@test.com"
] | test@test.com |
a4f838f0cb967caac7da1085f97a7e03189b5fa5 | 4fbe62e09f468b6ad413efd5f121d6b26d367d60 | /controller/helper.py | 5caff02d1f68bae012c8ffd8bf25bad0a943fd51 | [] | no_license | ztao/dmgae | d331da0f30b6e9ca36581b8349e9c98bdf02b06d | 6629bf2eb0fa2bbcd473031b32f27733995d8295 | refs/heads/master | 2021-01-24T06:39:10.283052 | 2013-08-12T19:13:06 | 2013-08-12T19:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,335 | py | import json
from ast import literal_eval
from model.event import EventModel, Activity, Relation, Place
def json2model(event_obj):
print event_obj
event_obj = json.loads(event_obj.replace('\r\n', '\\r\\n'))
id = int(event_obj["ID"])
event = EventModel.query(EventModel.eid == id).get()
if not event:
event = EventModel()
event.eid = int(event_obj["ID"])
event.name = event_obj["Name"]
event.brief = event_obj["Brief"]
event.start_date = event_obj["Start Date"]
event.duration = event_obj["Duration"]
event.activity_list = []
for a in event_obj["Activity List"]:
activity = Activity()
activity.aid = int(a["ID"])
activity.name = a["Name"]
activity.start_date = a["Start Date"]
activity.duration = a["Duration"]
activity.place = Place();
activity.place.name = a["Place"]["Name"]
activity.place.location = a["Place"]["Location"]
activity.tips = a["Tips"]
activity.relation = Relation()
activity.relation.previous = a["previous"]
activity.relation.next = a["next"]
activity.relation.xor = a["xor"]
event.activity_list.append(activity)
event.before_you_go = str(event_obj["Before You Go"])
event.general_tips = event_obj["General Tips"]
return event
def model2json(event_model):
event = {}
event["ID"] = event_model.eid
event["Name"] = event_model.name
event["Brief"] = event_model.brief
event["Start Date"] = event_model.start_date
event["Duration"] = event_model.duration
event["Activity List"] = []
a1 = []
for a2 in event_model.activity_list:
a1 = {
"ID" : a2.aid,
"Name" : a2.name,
"Start Date" : a2.start_date,
"Duration" : a2.duration,
"Place" : {
"Name": a2.place.name,
"Location": a2.place.location,
"Weather": a2.place.weather
},
"Tips": a2.tips,
"Relation": {
"previous": a2.relation.previous,
"next": a2.relation.next,
"xor": a2.relation.xor
}
}
event["Activity List"].append(a1)
event["Before You Go"] = literal_eval(event_model.before_you_go)
event["General Tips"] = event_model.general_tips
return json.dumps(event)
| [
"ziwang.tao@gmail.com"
] | ziwang.tao@gmail.com |
edf06fc59a34181ad65492c202910cb9eb9876f2 | bf0f5fd1b11e988767c9493a7d3b2e97da457ca2 | /uniq_samples/code_samples/CommandRunner/src/cmd_runner.py | b96c9a2231c0201c424126c59ce6aa8bedffd834 | [
"Apache-2.0"
] | permissive | jorschul/dnac-samples-aradford | 6fe818c93c5de49fa24b4bdaa4508d47802333e2 | 8d6ab78c1b00904beba8a5f433b92df67a61f150 | refs/heads/master | 2021-09-15T19:06:19.311079 | 2018-06-08T20:26:43 | 2018-06-08T20:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,135 | py | #!/usr/bin/env python
import ast
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from login import login
import json
import jtextfsm as textfsm
from argparse import ArgumentParser
def run_command(dnac, devs, cmds):
if cmds is [None]:
cmds =["show clock"]
print (cmds)
dto={
"name" : "show ver",
"deviceUuids" : devs,
"commands" : cmds
}
task = dnac.networkdevicepollercli.submitCommands(commandRunnerDto=dto)
if task:
task_response=dnac.task_util.wait_for_task_complete(task, timeout=20)
if task_response:
# only needed until we fix the output of this progress field to be json
fileId=ast.literal_eval(task_response.progress)['fileId']
file = dnac.file.downLoadFile(fileId=fileId)
return file.text
def deviceip_to_id(dnac, device_ip):
network_device = dnac.networkdevice.getNetworkDeviceByIp(ipAddress=device_ip)
return network_device.response.id
def deviceid_to_ip(dnac, deviceId):
network_device = dnac.networkdevice.getNetworkDeviceById(id=deviceId)
return network_device.response.managementIpAddress
def deviceid_to_name(dnac, deviceId):
network_device = dnac.networkdevice.getNetworkDeviceById(id=deviceId)
return network_device.response.hostname
def tag_to_ip(dnac, tag):
if tag is None:
return []
topology = dnac.topology.getPhysicalTopology()
return [ node.ip for node in topology.response.nodes if node.tags and tag in node.tags]
def format_fsm():
pass
def format_response(dnac, res_json, human, fsm, table):
if human:
for response in res_json:
success = response['commandResponses']['SUCCESS']
failure = response['commandResponses']['FAILURE']
devuuid = response["deviceUuid"]
for key in success.keys():
print ('{ip}: {command}:\n{success}\n{failure}'.format(ip=deviceid_to_ip(dnac, devuuid),
command=key, success=success[key],
failure=failure))
elif fsm:
# need to generate this in an optimal way
# could have more than one command
# should fail if this does not work?
template = open(fsm)
re_table = textfsm.TextFSM(template)
table_keys = re_table.header
if table:
print ('IP,Name,Command,' + ','.join(table_keys))
for response in res_json:
re_table.Reset()
success = response['commandResponses']['SUCCESS']
failure = response['commandResponses']['FAILURE']
devuuid = response["deviceUuid"]
for key in success.keys():
if success:
raw = re_table.ParseText(success[key])
# will return a list of lists. a command may return a table, i.e. multiple values
base = '{ip},{name},{command},'.format(ip=deviceid_to_ip(dnac, devuuid),
name=deviceid_to_name(dnac, devuuid),
command=key)
if table:
# join all raw fields together comma sepperated. Append the base to the start of each line
formatted = "\n".join(list(map(lambda x: base.__add__(x), (map(lambda x: ','.join(x), raw)))))
else:
formatted = base + ([",".join([x+":"+y for (x,y) in zip(table_keys,record)]) for record in raw])
print(formatted)
if failure:
print('{ip},{command},FAILURE {failure}'.
format(ip=deviceid_to_ip(dnac, devuuid),
command=key,
failure=failure))
else:
print(json.dumps(res_json, indent=2))
if __name__ == "__main__":
parser = ArgumentParser(description='Select options.')
parser.add_argument('--commands', type=str,
help="commands to run")
parser.add_argument('--intregexp', type=str,
help="interface regular expression (requires a {{INTF}} parameter in the command")
parser.add_argument('--tag', type=str,
help="tag for devices to choose")
parser.add_argument('--ip', type=str,
help="ip address for devices to choose")
parser.add_argument('--human', action='store_true',
help="human output or machine")
parser.add_argument('--fsm', action='store_true',
help="format output using textfsm template")
parser.add_argument('--table', action='store_true', default=False,
help="table format output using textfsm template")
parser.add_argument('-v', action='store_true',
help="verbose")
args = parser.parse_args()
dnac = login()
print ("tag:", args.tag)
ips = None
if args.tag:
ips = tag_to_ip(dnac, args.tag)
elif args.ip:
ips = [args.ip]
if ips:
ids = [deviceip_to_id(dnac, ip) for ip in ips]
else:
print("no ips or tags for network devices")
validCmds = dnac.networkdevicepollercli.getLegitCliKeywords()
print("ValidCommands: {0}".format(", ".join(validCmds.response)))
exit(1)
#print ("commands:", args.commands)
try:
cmds = json.loads(args.commands)
except ValueError:
cmds = [args.commands]
fsm = args.fsm
table = args.table
if args.fsm:
file="fsm/" + '_'.join(args.commands.split()) + '.textfsm'
try:
f = open(file)
f.close()
except KeyError:
print("no fsm file", file)
exit(1)
fsm = file
res = run_command(dnac, devs=ids, cmds=cmds)
res_json = json.loads(res)
format_response(dnac, res_json, args.human, fsm, table)
# max of 5 commands per request | [
"aradford@cisco.com"
] | aradford@cisco.com |
e7ac3e943d33f282eac4d0e7a3b4aac178f27edb | f2eb2e5bbb2b7c8d09094ea92105fc119956ab2d | /category/migrations/0001_initial.py | fb781b98ff210712a4765ed6244264712ec62e47 | [] | no_license | MichangaVictor/greatkart | ada27fcec3e44efe2322b7b487138f8045e5eeae | 3d35a884fe25fd67382827d174fa2477a382225d | refs/heads/main | 2023-09-01T07:08:19.158330 | 2021-09-25T11:14:21 | 2021-09-25T11:14:21 | 407,810,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # Generated by Django 3.1 on 2021-09-17 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=50, unique=True)),
('slug', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True, max_length=255)),
('cat_image', models.ImageField(blank=True, upload_to='photos/categories')),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
]
| [
"onyango.vic@yandex.com"
] | onyango.vic@yandex.com |
855e7d3b3e1a062a0a4d29cccd95979c6e39f28d | cd85cac1855e533f47d91735b072a640e4d032c9 | /blog/migrations/0002_comment.py | d111b7f248fb9b54d2d06623bede6d5b676d8ef4 | [] | no_license | Rem1219/Sunduk | 154fc52d35f1921a2607a08bacc4eb24064d44bd | 4f1bd9224a53432bdc7036a12819e571cd05d0cb | refs/heads/master | 2021-01-17T23:21:25.107415 | 2016-06-24T18:17:57 | 2016-06-24T18:17:57 | 61,186,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-24 16:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| [
"rem12191@gmail.com"
] | rem12191@gmail.com |
276977c5c0f0c8513d7f9b377f884740796791cd | 34876b55af2c25629a7617a329f209de6c9f274f | /env/lib/python3.6/functools.py | 8559f5b0239570c6be959bba2b19de7ea3dd3b89 | [] | no_license | Fastwriter/django-rest-framework-tutorial | 350a239b85e31ebd7e3f0946e7841d1aa04fe165 | 72429f51d645d6c2357ae8107bb6021ef2cb5a9e | refs/heads/master | 2020-04-11T13:08:53.681228 | 2018-12-14T15:40:45 | 2018-12-14T15:40:45 | 161,805,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | /Users/demeuovdaulet/anaconda3/lib/python3.6/functools.py | [
"demeuov.daulet.nis.ura@gmail.com"
] | demeuov.daulet.nis.ura@gmail.com |
2d493828a291e0d20a0b71cb7adc4a216c1109a8 | a83d577557b0d1cb348560879468d0563ababa11 | /ecommerce/forms.py | 664bf5947e9b5a53e83e60dd04b854e800bb70c3 | [] | no_license | MaazAr/Mark-Django | 7856edf2fb8cefd4158daa9a3376823101eb3731 | e8ae75a05a4e0975e704de7412099a130602f5fb | refs/heads/master | 2023-01-05T18:17:43.804007 | 2020-11-02T13:40:11 | 2020-11-02T13:40:11 | 309,380,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class ContactForm(forms.Form):
fullname = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control", "placeholder": "your full name"}))
email = forms.EmailField(widget=forms.EmailInput(attrs={"class": "form-control", "placeholder": "your email"}))
content = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control", "placeholder": "message"}))
| [
"maazabdul@ymail.com"
] | maazabdul@ymail.com |
e92bb7009b48dbf53be81f216d049bab6787cdce | 5d61565651b7ba5fa8fade3313a5e82fca8b6686 | /login/migrations/0003_auto_20190709_2213.py | 58c72a12c002fd6586fd9fbdb94b2ed1aaacc6c2 | [] | no_license | lonelyxmas/ISMS | d597b00072bfa77907875f575b866fbb1fb53295 | 08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306 | refs/heads/master | 2023-08-14T12:02:59.001215 | 2021-03-22T03:34:58 | 2021-03-22T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # Generated by Django 2.1.4 on 2019-07-09 14:13
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20190704_0826'),
]
operations = [
migrations.AlterField(
model_name='user',
name='FID',
field=models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='user',
name='FType',
field=models.IntegerField(choices=[(0, '企业账户'), (1, '合作伙伴'), (2, '管理员')], default=0, verbose_name='用户类型'),
),
]
| [
"11325818@qq.com"
] | 11325818@qq.com |
db9503f8d4917677b10f97a48c4f912d05a9290a | acc244c97a943d8e2074339afa1bff1274ae4cfc | /CGATPipelines/PipelineMedip.py | 3f12a921f960aaedb163d725a83b325930f8e7fb | [] | no_license | eromasko/cgat | 00114f4c95b439ba6595ddf2092d1a3307347401 | d82d197f3913b8d65b656c0b205ca48854fdb2a6 | refs/heads/master | 2021-01-17T09:37:17.168278 | 2015-02-20T09:03:31 | 2015-02-20T09:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,824 | py | '''
PipelineMedip.py - tasks associated with MedipSeq analysis
==========================================================
'''
import re
import os
import collections
import sqlite3
import CGAT.Experiment as E
import CGAT.Pipeline as P
import CGAT.Database as Database
import CGAT.IOTools as IOTools
from rpy2.robjects import r as R
import rpy2.robjects as ro
PARAMS = {}
def buildDMRStats(tables, method, outfile):
'''build dmr summary statistics.
Creates some diagnostic plots in
<exportdir>/<method> directory.
Tables should be labeled <tileset>_<design>_<method>.
'''
dbhandle = sqlite3.connect(PARAMS["database"])
def togeneset(tablename):
return re.match("([^_]+)_", tablename).groups()[0]
keys_status = "OK", "NOTEST", "FAIL", "NOCALL"
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(("tileset", "design", "track1", "track2", "tested",
"\t".join(["status_%s" % x for x in keys_status]),
"significant",
"up", "down",
"twofold",
"twofold_up", "twofold_down",
)) + "\n")
all_tables = set(Database.getTables(dbhandle))
outdir = os.path.join(PARAMS["exportdir"], "diff_methylation")
for tablename in tables:
prefix = P.snip(tablename, "_%s" % method)
tileset, design = prefix.split("_")
def toDict(vals, l=2):
return collections.defaultdict(int, [(tuple(x[:l]), x[l]) for x in vals])
E.info("collecting data from %s" % tablename)
tested = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
GROUP BY treatment_name,control_name""" % locals() ).fetchall() )
status = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, status, COUNT(*) FROM %(tablename)s
GROUP BY treatment_name,control_name,status""" % locals() ).fetchall(), 3 )
signif = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE significant
GROUP BY treatment_name,control_name""" % locals() ).fetchall() )
fold2 = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE (l2fold >= 1 or l2fold <= -1) AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
up = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold > 0 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
down = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold < 0 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
fold2up = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold > 1 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
fold2down = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold < -1 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
groups = tested.keys()
for treatment_name, control_name in groups:
k = (treatment_name, control_name)
outf.write("\t".join(map(str, (
tileset,
design,
treatment_name,
control_name,
tested[k],
"\t".join([str(status[(treatment_name, control_name, x)])
for x in keys_status]),
signif[(k)],
up[k], down[k],
fold2[k],
fold2up[k], fold2down[k]))) + "\n")
###########################################
###########################################
###########################################
# plot length versus P-Value
data = Database.executewait(dbhandle,
'''SELECT end - start, pvalue
FROM %(tablename)s
WHERE significant''' % locals() ).fetchall()
# require at least 10 datapoints - otherwise smooth scatter fails
if len(data) > 10:
data = zip(*data)
pngfile = "%(outdir)s/%(tileset)s_%(design)s_%(method)s_pvalue_vs_length.png" % locals()
R.png(pngfile)
R.smoothScatter(R.log10(ro.FloatVector(data[0])),
R.log10(ro.FloatVector(data[1])),
xlab='log10( length )',
ylab='log10( pvalue )',
log="x", pch=20, cex=.1)
R['dev.off']()
outf.close()
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
c1e309ba4de5e590737e4abc08092bc2eecb0f47 | 569906ef0204e7de0947b3a17be20145da6c325a | /exampleCode/heatConduction3DUnsteady.py | f479a9750c26843f3f6734ebf56c440ed1fce9d6 | [] | no_license | amikkonen/NumericalTechniquesForProcessModelling | 1a28678f75ac4d7b7c8419e963f39fffbe721e0b | d9a31f09f1e01f8be5dddf3b8e3ebc2052c5a055 | refs/heads/master | 2023-02-08T16:50:37.693290 | 2023-02-05T16:26:06 | 2023-02-05T16:26:06 | 116,688,697 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,514 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A course example of unsteady 3D heat conduction.
Constant propersties, contant temperature boundary, zero gradient (insulated)
boundary, transient.
NOTE: USE OF SPARSE MATRIXES IN MatrixA
Created on Sat Jan 13 13:10:11 2018
@author: Antti Mikkonen, a.mikkonen@iki.fi
"""
import scipy as sp
from scipy import linalg
from matplotlib import pyplot as plt
import scipy.sparse as sparse
from scipy.sparse import linalg as splinalg
import openpyxl
from scipy import interpolate
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import time
##############################################################################
#def write_excell(A,b,T=None):
#
# wb = openpyxl.Workbook()
# dest_filename = 'AbT.xlsx'
#
# wsA = wb.active
# wsA.title = "A"
#
# for row in range(len(A)):
# wsA.append(list(A[row]))
#
# wsb = wb.create_sheet(title="b")
# wsb.append(list(b))
#
## wsT = wb.create_sheet(title="T")
## for row in range(len(T)):
## wsT.append(list(T[row]))
#
# wb.save(filename = dest_filename)
class MatrixA(object):
def __init__(self, n_tot, full):
self.full = full
if self.full:
self.A = sp.zeros((n_tot,n_tot), dtype=sp.float_)
else:
self.l = []
self.m = []
self.val = []
def add(self, l, m, val):
if self.full:
self.A[l,m] += val
else:
self.l.append(l)
self.m.append(m)
self.val.append(val)
def finalize(self):
if self.full:
return self.A
else:
return sparse.csr_matrix(sparse.coo_matrix(
(self.val, [self.l, self.m])
))
def solver(L, T_B, T, ks, rhoc, n, dt, t_max, constant_T_batch_coords,
full=False):
nx = n[0]; ny = n[1]; nz = n[2]
n_tot = n.prod()
# Cell sizes
dx = L / n
# Coords of cell centers
xyz = sp.zeros((3,nx,ny,nz), dtype=sp.float_)
x = (sp.ones(nx)*dx[0]).cumsum()-dx[0]/2
y = (sp.ones(ny)*dx[1]).cumsum()-dx[1]/2
z = (sp.ones(nz)*dx[2]).cumsum()-dx[2]/2
for kz in range(nz):
for ky in range(ny):
for kx in range(nx):
xyz[0][kx,ky,kz] = x[kx]
xyz[1][kx,ky,kz] = y[ky]
xyz[2][kx,ky,kz] = z[kz]
# Face area
Ax = dx[1]*dx[2]
Ay = dx[0]*dx[2]
Az = dx[0]*dx[1]
# Volume
dV = dx.prod()
# Current time (s)
t = 0
# Number of steps
steps = int(sp.ceil(t_max/dt))
# Coefficients
aW = ks*Ax/dx[0]
aE = ks*Ax/dx[0]
aS = ks*Ay/dx[1]
aN = ks*Ay/dx[1]
aB = ks*Az/dx[2]
aT = ks*Az/dx[2]
aP0 = rhoc*dV/dt
# Source vector
bConstant = sp.zeros(n_tot, dtype=sp.float_)
# Coefficient matrix
A = MatrixA(n_tot, full)
# Temperatures
Ts = sp.zeros((steps+1,n_tot), dtype=sp.float_)
# Initial tempereture
Ts[0,:] = T
#################################################
# Boundary indexes
#################################################
# x boundaries
xMin = []
for k in range(n_tot):
if k % nx == 0:
xMin.append(k)
xMin = sp.array(xMin, dtype=sp.int_)
# xMax = []
# for k in range(n_tot):
# if (k + 1) % nx == 0:
# xMax.append(k)
# xMax = sp.array(xMax, dtype=sp.int_)
# y boundaries
yMin = []
for k in nx*ny*sp.arange(nz):
for kx in range(nx):
yMin.append(k+kx)
yMin = sp.array(yMin, dtype=sp.int_)
yMax = []
for k in nx*ny*sp.arange(nz):
for kx in range(nx):
yMax.append(k+kx+nx*ny-nx)
yMax = sp.array(yMax, dtype=sp.int_)
# z boundaries
# zMin = sp.arange(nx*ny, dtype=sp.int_)
# zMax = sp.arange((nz-1)*nx*ny, (nz)*nx*ny, dtype=sp.int_)
# yzMin
# yzMin = sp.intersect1d(yMin,zMin)
# Build inner diffusion
for k in range(n_tot):
# Add aW to matrix A
if k % n[0] != 0:
A.add(k,k, aW)
A.add(k,k-1,-aW)
# Add aE to matrix A
if (k + 1) % n[0] != 0:
A.add(k,k, aE)
A.add(k,k+1,-aE)
# Add aS to matrix A
if k not in yMin:
A.add(k,k, aS)
A.add(k,k-nx,-aS)
# Add aN to matrix A
if k not in yMax:
A.add(k,k, aN)
A.add(k,k+nx,-aN)
# Add aB to matrix A
if k >= nx*ny:
A.add(k,k, aB)
A.add(k,k-nx*ny,-aB)
# Add aT to matrix A
if k < (nz-1)*nx*ny:
A.add(k,k, aT)
A.add(k,k+nx*ny,-aT)
########################################
# Add time coefficient to diagonal
########################################
for k in range(n_tot):
A.add(k,k,aP0)
########################################
# Constant T boundary
########################################
cnY = constant_T_batch_coords[0] / dx[1]
cnZ = constant_T_batch_coords[1] / dx[2]
# z,y
xMinMap = xMin.reshape((nz,ny))
# Full faces
cnYi = int(cnY)
cnZi = int(cnZ)
for ky in range(cnYi):
for kz in range(cnZi):
k = xMinMap[kz,ky]
A.add(k,k,2*aW)
bConstant[k] += 2*aW*T_B
if cnYi < ny:
for kz in range(cnZi):
k = xMinMap[kz,cnYi]
A.add(k,k,2*aW*cnY%1)
bConstant[k] += 2*aW*T_B*cnY%1
if cnZi < nz:
for ky in range(cnYi):
k = xMinMap[cnZi,ky]
A.add(k,k,2*aW*cnZ%1)
bConstant[k] += 2*aW*T_B*cnZ%1
if cnYi < ny and cnZi < nz:
k = xMinMap[cnZi,cnYi]
A.add(k,k,2*aW*(cnZ%1)*(cnY%1))
bConstant[k] += 2*aW*T_B*(cnZ%1)*(cnY%1)
########################################
# Solution
########################################
A_final = A.finalize()
for step in range(1,steps+1):
b = bConstant + aP0*T
if full:
T = sp.linalg.solve(A_final,b)
else:
T = splinalg.spsolve(A_final, b)
Ts[step] = T
t += dt
########################################
# Post process
########################################
T3d = sp.zeros((steps+1,nx,ny,nz), dtype=sp.float_)
for step in range(1,steps+1):
k = 0
for kz in range(nz):
for ky in range(ny):
for kx in range(nx):
T3d[step,kx,ky,kz] = Ts[step][k]
k += 1
return T3d, dx, xyz
##############################################################################
if __name__ == "__main__":
def verify_with_1d():
# Number of control volumes
n = sp.array([10, 1, 1])
# Lenght (m)
L = sp.array([0.3, 0.5, 0.5])
constant_T_batch_coords = L[1:]
# Boundary temperatures (C)
T_B = 0
# Initial temperatures (C)
T_0 = 100
# Thermal conductivity (W/mK)
ks = 10
# Product of density and heat capasity (J/m3K)
rhoc = 10e6
# Time step (s)
dt = 1
# Stop time (s)
t_max = 10
Ts3d, dx3d, xyz = solver(L, T_B, T_0, ks, rhoc, n, dt,
t_max,constant_T_batch_coords,full=False)
import heatConduction1DUnsteady
Ts1d, dx1d = heatConduction1DUnsteady.solver(L[0], T_B, T_0, ks, rhoc,
n[0], dt, t_max)
T3d = Ts3d[-1,:,0,0]
T1d = Ts1d[-1][::-1]
print(T3d)
print(T1d)
assert(all(sp.isclose(T3d,T1d)))
print("OK")
###########################################################################
def main():
# Lenght (m)
L = sp.array([0.3, 0.4, 0.2])
dx = 0.01
dy = 0.05
dz = 0.05
# Number of control volumes
n = sp.array([int(sp.ceil(L[0]/dx)), int(sp.ceil(L[1]/dy)),
int(sp.ceil(L[2]/dz))])
print("n", n)
print("n tot", n.prod())
constant_T_batch_coords = [1e-2,1e-2]
# Boundary temperatures (C)
T_B = 100
# Initial temperatures (C)
T_0 = 0
# Thermal conductivity (W/mK)
ks = 10
# Product of density and heat capasity (J/m3K)
rhoc = 10e6
# Time step (s)
dt = 60*60
# Stop time (s)
t_max = 7*24*60*60
# Plot at times (s)
t_plot = sp.linspace(0,t_max, 15)#[0,40,80,t_max]
Ts, dx, xyz = solver(L, T_B, T_0, ks, rhoc, n, dt,
t_max,constant_T_batch_coords)
plt.figure(0)
x = sp.linspace(dx[0]/2,L[0]-dx[0]/2,n[0])
for t in t_plot:
step = int(t/dt)
# T = Ts[step,coords["yzMin"]]
T = Ts[step,:,0,0]
plt.plot(x*1e3, T, "d")
# print(str(t).ljust(4), T)
plt.xlim(0,L[0]*1e3)
plt.xlabel("x (mm)")
plt.ylabel("T (C)")
# Plot first and last cell temperature
plt.figure(1)
t = sp.arange(0,t_max+1,dt)
plt.plot(t, Ts[:,0,0,0], label="first")
plt.plot(t, Ts[:,-1,0,0], label="last")
plt.legend()
plt.xlim(0,t_max)
plt.xlabel("t (s)")
plt.ylabel("T (C)")
###########################################################################
def timeit():
L = sp.array([0.3, 0.4, 0.2])
constant_T_batch_coords = [1e-2,1e-2]
# Boundary temperatures (C)
T_B = 100
# Initial temperatures (C)
T_0 = 0
# Thermal conductivity (W/mK)
ks = 10
# Product of density and heat capasity (J/m3K)
rhoc = 10e6
# Time step (s)
dt = 60*60
# Stop time (s)
t_max = 7*24*60*60
full_t = []
sparse_t = []
divs = sp.arange(1,10+1)
ns = sp.zeros(len(divs))
for k in divs:
print(k)
dx = 0.3 / k
dy = 0.4 / k
dz = 0.2 / k
# Number of control volumes
n = sp.array([int(sp.ceil(L[0]/dx)), int(sp.ceil(L[1]/dy)),
int(sp.ceil(L[2]/dz))])
ns[k-1] = n.prod()
start = time.time()
Ts, dx, xyz = solver(L, T_B, T_0, ks, rhoc, n, dt,
t_max,constant_T_batch_coords, full=False)
sparse_t.append(time.time()-start)
start = time.time()
Ts, dx, xyz = solver(L, T_B, T_0, ks, rhoc, n, dt,
t_max,constant_T_batch_coords, full=True)
full_t.append(time.time()-start)
full_t = sp.array(full_t)
sparse_t = sp.array(sparse_t)
print(full_t/sparse_t)
plt.plot(ns,full_t/sparse_t,"d-k")
plt.xlabel("cells")
plt.ylabel("full time / sparse time")
plt.savefig("fullVSsparseTime.pdf")
###########################################################################
print("START")
# verify_with_1d()
main()
# timeit()
print("END") | [
"a.mikkonen@iki.fi"
] | a.mikkonen@iki.fi |
7e043518a2f136a4085f8d4d13121670413127d1 | 298f0799a6c7ff0ae2ed4994160b22527c0e2a3d | /cars.py | 4d665eed95666238b833bc8a15916f9c16ec75c3 | [] | no_license | marinacruzjordao/Python_Rent_A_Car | 2d90155eb7eb2ac34d66de4aee4e721aada534e7 | 5b82642d66d46ecac66d8a861876d9420288f43a | refs/heads/master | 2023-03-07T02:47:51.237946 | 2021-02-20T15:14:24 | 2021-02-20T15:14:24 | 338,102,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py |
import sqlite3
import images
from PySimpleGUI import PySimpleGUI as sg
class RentACar:
def __init__(self):
#layout
sg.theme('Reddit')
def wind_initial(self):
self.layout=[
[sg.Button(image_data=images.imge)],
[sg.Button('Cars Catalogue',size=(20,2)),sg.Button('Car Registration ',size=(20,2))],
#[sg.Output(size=(140,10))],
]
return sg.Window('Rent a Car',layout=self.layout)
def start(self):
#Create initial window
#self.window1, self.window2 =r.wind_initial(), None
#self.w1 = sg.Window('Rent a Car',layout=self.layout)
#self.w2 = sg.Window('Car Registration').layout(self.layout2)
#connect cars data_base
#r.cars_data_base_connection()
wind1=r.wind_initial()
while True:
event, values = wind1.read()
#self.event, value=self.w1.read()
#self.letter=value.get('letter')
#When window is closed
if event == sg.WINDOW_CLOSED:
#if self.windows == self.w1 and self.event == sg.WIN_CLOSED:
#r.close_program()
break
#if self.window == self.w1 and self.event == 'Cars Catalogue':
# r.display_cars()
#if self. window == self.w1 and self.event == 'Car Registration':
# r.add_car()
r=RentACar()
r.start() | [
"marinacruzjordao@gmail.com"
] | marinacruzjordao@gmail.com |
6ef3b6ebd24d6353ea1c4341f997729f76f1d7e8 | 2b2cabd2e436ec5350717bf4f2a798c20cf09efb | /Evaluation/textgrid_2_textfile.py | 65f0b3a006da939e3ce5698d590777aa4337cd6f | [] | no_license | miastra/SwedishForcedAligner | 1ac9db7a33a6c639f1249acbe57f48f312d84938 | 6fb14bd480f67d6e674579d7541d427c2a1ea3b9 | refs/heads/master | 2021-04-06T01:08:32.019060 | 2018-04-06T07:34:14 | 2018-04-06T07:34:14 | 125,353,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | import os
import re
# Extract labels and start and end times from a TextGrid
def extract_word_alignments(tg_path):
textgrid = open(tg_path)
lines = textgrid.readlines()
intervaltier_idx = []
for i in range(0, len(lines)):
if re.search('IntervalTier', lines[i]):
intervaltier_idx.append(i)
word_ali_lines = []
for j in range(intervaltier_idx[0], intervaltier_idx[1]):
if re.search('intervals \[', lines[j]):
text = re.sub('[\t]*text = ', '', lines[j+3])
if not text in ['"sil"\n', '"sp"\n', '""\n']:
word_ali_lines.append(re.sub('[\t]*xmin = ', '', lines[j+1]))
word_ali_lines.append(re.sub('[\t]*xmax = ', '', lines[j+2]))
word_ali_lines.append(text)
return word_ali_lines
# Create a folder with textfiles based on the TextGrids in the folder textgrid_path
def textfiles_from_tg_folder(textgrid_path, textfile_folder_path, model_name):
# Check if the folder exists, otherwise create it
if not os.path.exists(textfile_folder_path):
os.makedirs(textfile_folder_path)
# Loop over all TextGrids
for tg in os.listdir(textgrid_path):
tg_path = os.path.join(textgrid_path, tg)
textgrid = open(tg_path)
# Extract relevant information from the TextGrid
align_info = extract_word_alignments(tg_path)
# Save it in a textfile
textfile_path = (os.path.join(textfile_folder_path, (tg + '.txt'))).replace('.TextGrid', '')
textfile = open(textfile_path, 'w')
for item in align_info:
textfile.write(item)
# Create textfiles from all TextGrids structured the way MFA outputs them
def textfiles_from_folder_with_folders(textgrid_folder_path, textfile_folder_path, model_name):
for folder in os.listdir(textgrid_folder_path):
textgrid_path = os.path.join(textgrid_folder_path, folder)
textfiles_from_tg_folder(textgrid_path, textfile_folder_path, model_name)
def main():
# Example run
# Path to the folder containing folders with TextGrids
current_path = os.path.abspath(os.path.dirname(__file__))
textgrid_folder_path = os.path.join(current_path, 'Textgrid_2_textfile_example', 'TextGrids')
# Model name (for naming folders and files in a logical way)
model_name = 'Example'
# Path to the folder in which the textfiles should be saved
textfile_folder_path = os.path.join('Textgrid_2_textfile_example', 'Textfiles', model_name)
textfiles_from_folder_with_folders(textgrid_folder_path, textfile_folder_path, model_name)
if __name__ == "__main__":
main()
| [
"miastra@kth.se"
] | miastra@kth.se |
ec66bc0543ba3c33f0203126bbdc78da3258cd28 | 38d8d417c3ba38b11fd33d82acb3899c65342b23 | /Actividad4.py | eb27e34bdf92e80f982573b237c74ff7212c60b7 | [] | no_license | kevinalx/PythonTkinter | 41a11ef025b4ddc2f8753dc053c20bad87d2fb58 | a9fbdf4d2b892e475d817ecd6b6eabece5de2bea | refs/heads/master | 2022-12-04T14:22:40.942465 | 2020-08-26T03:22:08 | 2020-08-26T03:22:08 | 290,383,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | #Un alumno desea saber cual será su calificación final en la materia de Algoritmos. Dicha
#calificación se compone de los siguientes porcentajes:
#55% del promedio de sus tres calificaciones parciales.
#30% de la calificación del examen final.
#15% de la calificación de un trabajo final.
#Leer c1, c2, c3, ef, tf
#prom = (c1 + c2 + c3)/3
#ppar = prom * 0.55
#pef = ef * 0.30
#ptf = tf * 0.15
#cf = ppar + pef + ptf
#Imprimir cf
from tkinter import *
def calcular():
prom = (calf1 + calf2 + calf3)/3
ppar = prom * 0.55
pef = examen * 0.30
ptf = tf * 0.15
cf = ppar + pef + ptf
print(f"\nSu nota definitiva es de: {cf}")
top = Tk()
calf1 = float
calf2 = float
calf3 = float
examen = float
tf = int
top.geometry("400x400")
top.title("Sistema de calificaciones")
etiqueta1 = Label(top, text="Ingrese la calificacion 1 ").place(x=10, y=10)
C1 = Entry(top, textvariable=calf1).place(x=200, y=10)
etiqueta2 = Label(top, text="Ingrese la calificacion 2 " ).place(x=10, y=40)
C2 = Entry(top, textvariable=calf2).place(x=200, y=40)
etiqueta3 = Label(top, text="Ingrese su calificacion 3 ").place(x=10, y=70)
C3 = Entry(top, textvariable=calf3).place(x=200, y=70)
etiquetaExamen = Label(top, text="Ingrese su nota del examen final ").place(x=10, y=100)
examenFinal = Entry(top, textvariable=examen).place(x=200, y=100)
etiquetaFinal = Label(top, text="Ingrese nota de su trabajo final ").place(x=10, y=130)
trabajoFinal = Entry(top, textvariable=tf).place(x=200, y=130)
boton = Button(top, text="Calcular", command=calcular).place(x=10, y=160)
top.mainloop() | [
"kevintorres1208@gmail.com"
] | kevintorres1208@gmail.com |
6ee99c0fb1e4c9f4f6261a6f986415fe77cd8534 | 113641697bd7db550eed62a6f2194e12dd2125af | /Tests/AddBookTests.py | 371ed95c50f9f5fc74e211e1a07fe988e371e412 | [] | no_license | mohabayman/BookStore-PythonAutomation | 79debd340cc733dbdefc129f692ff54e30a0da2e | 0093d380541833f367db1fc876cc5039b7f1c348 | refs/heads/master | 2020-12-02T06:45:50.603637 | 2018-12-18T17:25:56 | 2018-12-18T17:25:56 | 96,895,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | from PageObjects.HomePage import HomePage
from PageObjects.LoginPage import LoginPage
from PageObjects.AdminPage import AdminPage
from PageObjects.BooksPage import BooksPage
from PageObjects.AddBookPage import AddBookPage
from Utils.DriverFactory import DriverFactory
from time import sleep
class AddBookTests(object):
@staticmethod
def add_book():
driver = DriverFactory.get_driver('Edge')
driver.get('http://10.1.23.10/bookstore')
sleep(1)
home_page = HomePage(driver)
home_page.click_on_admin_link()
sleep(1)
login_page = LoginPage(driver)
login_page.login('admin', 'admin')
sleep(1)
admin_page = AdminPage(driver)
admin_page.click_on_books_link()
sleep(1)
books_page = BooksPage(driver)
books_page.click_on_add_new()
sleep(1)
add_book = AddBookPage(driver)
add_book.add_book('ahmed', 'ali', 'Sci-Fi', '1000')
sleep(3)
driver.close()
| [
"seltahan@integrant.com"
] | seltahan@integrant.com |
f58e818470df252160aaaaa5eb5e4678b4f65192 | e8b3283b51da072bb3721f27250276e8ca09cb16 | /johnsonkuangwebsite/website/models.py | bfc721c9a7b1bd0bbb2dbc3a3ed5e447cf4a6162 | [] | no_license | johnsonkuang/johnson-kuang-website | 9f545cd9877e78e44325f27d8113f4b1724a010c | 804055acf5e0444f1d34b36664ad3239a5209b3b | refs/heads/master | 2020-03-27T09:46:35.779249 | 2019-01-11T07:25:07 | 2019-01-11T07:25:07 | 146,371,291 | 0 | 0 | null | 2018-11-05T21:56:24 | 2018-08-28T00:50:08 | CSS | UTF-8 | Python | false | false | 7,738 | py | from django.db import models
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_delete, pre_save, post_save, m2m_changed
from django.utils import timezone
from django.contrib.auth.models import User
from datetime import date
from image_cropping import ImageRatioField
from sortedm2m.fields import SortedManyToManyField
from website.utils.fileutils import UniquePathAndRename
class Project(models.Model):
name = models.CharField(max_length=255)
#short name for urls
short_name = models.CharField(max_length=255)
short_name.help_text = "This should be the same as the name but lower case with no spaces or special characters"
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
gallery_image = models.ImageField(upload_to='projects/images', blank=True, null=True, max_length=255)
cropping = ImageRatioField('gallery_image', '500x400', size_warning=True)
about = models.TextField(null=True, blank=True)
def save(self, *args, **kwargs):
self.short_name = ''.join(e for e in self.name.lower() if e.isalnum())
super(Project, self).save(*args, **kwargs)
class Banner(models.Model):
# Separation of videos by page they are to be loaded with for future filtering
INDEX = "Index"
ABOUT = "About"
RESUME = "Resume"
VACATION = "Vacation"
PROJECTS = "Projects"
IND_PROJECT = "Individual Project"
SANDBOX = "Sandbox"
PAGE_CHOICES = (
(INDEX, INDEX),
(ABOUT, ABOUT),
(RESUME, RESUME),
(VACATION, VACATION),
(PROJECTS, PROJECTS),
(IND_PROJECT, IND_PROJECT),
(SANDBOX, SANDBOX),
)
page = models.CharField(max_length=50, choices=PAGE_CHOICES, default=INDEX)
image = models.ImageField(blank=True, upload_to=UniquePathAndRename('banner', True), max_length=255)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.CASCADE)
project.help_text = "If this banner is attached to a specific project, set page to IND_Project"
cropping = ImageRatioField('image', '2000x500', free_crop=True)
image.help_text = 'You must select "Save and continue editing" at the bottom of the page after uploading a new image for cropping. Please note that since we are using a responsive design with fixed height banners, your selected image may appear differently on various screens.'
title = models.CharField(max_length=50, blank=True, null=True)
caption = models.CharField(max_length=1024, blank=True, null=True)
alt_text = models.CharField(max_length=1024, blank=True, null=True)
link = models.CharField(max_length=1024, blank=True, null=True)
favorite = models.BooleanField(default=False)
favorite.help_text = 'Check this box if this image should appear before other (non-favorite) banner images on the same page.'
date_added = models.DateField(auto_now=True)
def admin_thumbnail(self):
if self.image:
return u'<img src="%s" height="100"/>' % (self.image.url)
else:
return "No image found"
admin_thumbnail.short_description = 'Thumbnail'
admin_thumbnail.allow_tags = True
def __str__(self):
if self.title and self.page:
return self.title + ' (' + self.get_page_display() + ')'
else:
return "Banner object for " + self.get_page_display()
'''
TODO: Get this to work
@receiver(pre_delete, signal=Banner)
def banner_delete(sender, instance, **kwargs):
if instance.image:
instance.image.delete(True)
'''
class About_Gallery(models.Model):
name = models.CharField(max_length=255)
image = models.ImageField(upload_to='about/gallery/', max_length=255)
cropping = ImageRatioField('image', '400x300', free_crop=True)
class Image(models.Model):
name = models.CharField(max_length=255, blank=True)
image = models.ImageField(upload_to='images/', max_length=255)
cropping = ImageRatioField('image', '2000x500', free_crop=True)
caption = models.CharField(max_length=255, blank=True, null=True)
alt_text = models.CharField(max_length=255, blank=True, null=True)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.SET_NULL)
image.help_text = 'You must select "Save and continue editing" at the bottom of the page after uploading a new image for cropping. Please note that since we are using a responsive design with fixed height banners, your selected image may appear differently on various screens.'
# Copied from person model
# LS: Added image cropping to fixed ratio
# See https://github.com/jonasundderwolf/django-image-cropping
# size is "width x height"
def __str__(self):
return self.name
class Video(models.Model):
name = models.CharField(max_length=500)
video = models.FileField(verbose_name='static_videos', upload_to='videos/')
date = models.DateField(null=True)
project = models.ForeignKey(Project, blank=True, null=True, on_delete=models.SET_NULL)
#Separation of videos by page they are to be loaded with for future filtering
INDEX = "Index"
ABOUT = "About"
RESUME = "Resume"
VACATION = "Vacation"
PROJECTS = "Projects"
SANDBOX = "Sandbox"
PAGE_CHOICES = (
(INDEX, INDEX),
(ABOUT, ABOUT),
(RESUME, RESUME),
(VACATION, VACATION),
(PROJECTS, PROJECTS),
(SANDBOX, SANDBOX),
)
page = models.CharField(max_length=50, choices = PAGE_CHOICES, null=True)
def __str__(self):
return self.name + ": " + str(self.video)
class ResumeEntryEducation(models.Model):
school = models.CharField(max_length=255)
degree = models.CharField(max_length=255)
degree_specific = models.CharField(max_length=255, blank=True)
start_date = models.DateField()
end_date = models.DateField()
description = models.TextField()
def is_Present(self):
return self.end_date > date.today()
class ResumeEntryBasicInfo(models.Model):
#Meant to store all basic info in one entry
age = models.IntegerField()
email = models.EmailField()
phone = models.BigIntegerField()
address = models.CharField(max_length=255)
language = models.CharField(max_length=255)
about_description = models.TextField()
def __str__(self):
return 'Basic Info'
class ResumeSkill(models.Model):
name = models.CharField(max_length=255)
percent = models.IntegerField()
percent.help_text = 'You must choose an int between 0 and 100 representing precentage'
class ResumeWorkExperience(models.Model):
name = models.CharField(max_length=255)
start_date = models.DateField()
end_date = models.DateField()
position = models.CharField(max_length=255)
description = models.TextField()
def get_Start_Month(self):
return self.start_date.strftime("%B")
def get_End_Month(self):
return self.end_date.strftime("%B")
def is_Present(self):
return self.end_date > date.today()
class NewsletterUser(models.Model):
email = models.EmailField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
class Newsletter(models.Model):
EMAIL_STATUS_CHOICES = (
('Draft', 'Draft'),
('Published', 'Published')
)
subject = models.CharField(max_length=255)
body = models.TextField()
email = models.ManyToManyField(NewsletterUser)
status = models.CharField(max_length=10, choices=EMAIL_STATUS_CHOICES)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject
| [
"johnsonkuang@outlook.com"
] | johnsonkuang@outlook.com |
bd7ff72f18a4bbea1188117dfed8fe4b78f612df | 28a9db1536877930b7376cb66d18fafa2d01a1fe | /driftproject/routes.py | f2a77620e1181d6ab613806758a0e81eb6155150 | [] | no_license | reinaldo1526/drftprojectweek4hw | c3b409f6c4b8fb26e6620cbffce823ebd7f07d5b | 76b854b4ef59971dce251c4f148ac69552ff7f6c | refs/heads/master | 2020-12-10T21:15:21.404322 | 2020-01-13T23:10:52 | 2020-01-13T23:10:52 | 233,712,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | from driftproject import app, db
from flask import render_template, request, redirect, url_for
from driftproject.forms import SignupForm,LoginForm,PostForm,CheckForm
from werkzeug.security import check_password_hash
from flask_login import login_user, current_user,login_required
from driftproject.models import User,Post
# Home Route
@app.route("/")
def home():
posts = Post.query.all()
return render_template("home.html", post = posts)
# Sign Up Route
@app.route("/signup", methods=["GET", "POST"])
def signup():
signupForm = SignupForm()
if request.method == "POST":
username = signupForm.username.data
driftcar = signupForm.driftcar.data
phonenumber = signupForm.phonenumber.data
password = signupForm.password.data
print(username, driftcar, phonenumber,password)
user = User(username,driftcar,password)
db.session.add(user)
db.session.commit()
return render_template("signup.html", signupform = signupForm)
#login route
@app.route("/login", methods=["GET", "POST"])
def login():
loginForm = LoginForm()
if request.method == "POST":
user_username = loginForm.username.data
password = loginForm.password.data
# find out who the logged in user currently is
logged_user = User.query.filter(User.username == user_username).first()
if logged_user and check_password_hash(logged_user.password,password):
login_user(logged_user)
print(current_user.username)
return redirect(url_for('home'))
else:
print("Not Valid Method")
return render_template("login.html", loginform = loginForm)
@app.route("/post", methods = ["GET", "POST"])
@login_required
def post():
postForm = PostForm()
title = postForm.title.data
content = postForm.content.data
user_id = current_user.id
print(title,content,user_id)
# Saving Post Data to Database
post = Post(title = title, content = content, user_id = user_id)
db.session.add(post)
db.session.commit()
return render_template('post.html', postform = postForm)
@app.route("/post", methods = ["GET", "POST"])
@login_required
def check():
checkForm = CheckForm()
return render_template('check.html', checkform = checkForm)
| [
"Juny@reinaldos-MacBook-Air.local"
] | Juny@reinaldos-MacBook-Air.local |
94fe269b557ee98918bd55d5831a8de7b0bca7e3 | 9f0c4fdcebf2bdf83765841b565414c0d0db5038 | /8.3 - Emparelhamento Bipartido (Ponderado)/EmparelhamentoBipartidoPonderado.py | 77f6246e9d5d275268168a03be3f8b47227adfce | [] | no_license | gabrielgpavan2/Algoritmo-de-Grafos | b528cc1ba826db99c4ad5b4326ecd728f7ad2629 | 162a42dccb1dc6355441e005705f56e55746269b | refs/heads/master | 2020-04-30T02:26:36.639257 | 2019-03-19T16:57:57 | 2019-03-19T16:57:57 | 176,559,680 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | # -*- coding: utf-8 -*-
# Autoria:
# Fabiano S. Oliveira
# Paulo E. D. Pinto
#
# A correção e a análise de complexidade deste programa
# encontram-se no livro
#
# Teoria Computacional de Grafos, Elsevier, 2018
# Jayme L. Szwarcfiter
#Algoritmo 8.3: Emparelhamento Bipartido (Ponderado)
from Grafo import GrafoListaAdj
from EmparelhamentoBipartido import EmparelhamentoBipartido, ObterD, BuscaCamAumentante
def EmparelhamentoBipartidoPonderado(G, w):
#Dados: grado bipartido completo regular ponderado G, peso real w[v1][v2] ≥ 0 para cada aresta (v1,v2) ∈ E
c = [None]*(G.n+1)
e = [None]*(G.n+1)
for i in range(1,G.n+1):
e[i] = [None]*(G.n+1)
G.tV1 = G.n//2
def DefinirG0(G,e):
G0 = GrafoListaAdj(orientado = False)
G0.DefinirN(G.n)
G0.tV1 = G0.n//2
G0.d = [0]*(G0.n+1)
E0 = [(v1,v2) for (v1,v2) in G.E() if e[v1][v2] == 0.0]
for (v1,v2) in E0:
G0.AdicionarAresta(v1,v2)
G0.d[v1] =+ 1; G0.d[v2] =+ 1
return G0
for v in range(1,G.tV1+1):
c[v] = max ([w[v][v2] for v2 in G.N(v)])
for v in range(G.tV1+1,G.n+1):
c[v] = 0
for (v1,v2) in G.E():
e[v1][v2] = c[v1] + c[v2] - w[v1][v2]; e[v2][v1] = e[v1][v2]
G0 = DefinirG0(G,e)
while True:
M = EmparelhamentoBipartido(G0)
if len([i for i in range(len(M)) if M[i] != None]) < G.n:
D = ObterD(G0,M)
BuscaCamAumentante(D) #D.Marcado[v] <==> v in Vlin
eps = [e[v1][v2] for v2 in range(G.tV1+1,G.n+1) for v1 in range(1,G.tV1+1) if (not D.Marcado[v2]) and D.Marcado[v1]]
emin = min(eps)
for v in range(G.tV1+1,G.n+1):
if D.Marcado[v] > 0:
c[v] = c[v] + emin
for v in range(1,G.tV1+1):
if D.Marcado[v] > 0:
c[v] = c[v] - emin
for (v1,v2) in G.E():
e[v1][v2] = c[v1] + c[v2] - w[v1][v2]; e[v2][v1] = e[v1][v2]
G0 = DefinirG0(G,e)
else:
break
return (M, c)
G = GrafoListaAdj(orientado = False)
E = [(1,4,8),(1,5,7),(1,6,5),(2,4,4),(2,5,7),(2,6,2),(3,4,6),(3,5,3),(3,6,1)]
G.DefinirN(6)
for (u,v,c) in E:
e = G.AdicionarAresta(u,v); e.c = c
w = [None]*(G.n+1)
for i in range(1,G.n+1):
w[i] = [None]*(G.n+1)
for (u,e_no) in G.E(IterarSobreNo=True):
e = e_no.e
w[e.v1][e.v2], w[e.v2][e.v1] = e.c, e.c
(M,c) = EmparelhamentoBipartidoPonderado(G, w)
print ("c = {0}; M = {1}".format(c, M))
print()
| [
"noreply@github.com"
] | noreply@github.com |
0a016c76968b4e068f1c591451a39f1691413058 | 1ffe0a2e874eb48c8f6d41d353fc43ec9652372e | /SScriptCompiler/src/conf/SMpu9250.py | 6e43b4c00ad171995d823e5c9423452ae3541f5d | [
"MIT"
] | permissive | alklasil/SScript | 77c03acef77fba0894f085becbe2f5670e3e8b50 | de4481bf96e79b9ee157e266ea9fe8b1bfb3701e | refs/heads/master | 2021-03-27T11:30:21.073203 | 2018-05-25T16:40:56 | 2018-05-25T16:40:56 | 122,749,015 | 0 | 0 | MIT | 2018-03-28T19:44:23 | 2018-02-24T14:47:26 | Python | UTF-8 | Python | false | false | 2,452 | py | """Helper module for accessing functions."""
from src.SFunction import SFunction
from src.SVariable import SVariable
class SMpu9250:
"""Standard functions class for SScript."""
def __init__(self, useVariables=True):
self.useVariables = useVariables
def getFunctions(self):
""""Return a list of functions by mpu9250."""
return [
SFunction("mpu_readSensor"),
SFunction("mpu_getAccelX_mss"),
SFunction("mpu_getAccelY_mss"),
SFunction("mpu_getAccelZ_mss"),
SFunction("mpu_getGyroX_rads"),
SFunction("mpu_getGyroY_rads"),
SFunction("mpu_getGyroZ_rads"),
SFunction("mpu_getMagX_uT"),
SFunction("mpu_getMagY_uT"),
SFunction("mpu_getMagZ_uT"),
SFunction("mpu_getTemperature_C")
]
def getVariables(self, sdict):
"""Return list of variables by mpu9250."""
if not self.useVariables:
return []
return [
# basic variables
# If you do not use stdVariables, the first variable is not allowed
# to be a list
# sensor variables
# accelerometer
SVariable("Accel_mss"), # amplitude of [x,y,z]
SVariable("AccelX_mss"), # amplitude in x-direction
SVariable("AccelY_mss"), # amplitude in y-direction
SVariable("AccelZ_mss"), # amplitude in z-direction
# gyroscope
SVariable("Gyro_rads"),
SVariable("GyroX_rads"),
SVariable("GyroY_rads"),
SVariable("GyroZ_rads"),
# magnetometer
SVariable("Mag_uT"),
SVariable("MagX_uT"),
SVariable("MagY_uT"),
SVariable("MagZ_uT"),
# temperature
SVariable("Temperature_C"),
]
def firstVariable(self):
"""Return the first variable. Can be used in indexing lists."""
return "Accel_mss"
def lastVariable(self):
"""Return the last variable. Can be used in indexing lists."""
return "Temperature_C"
def getCpp(self, identifier):
"""Return c++ code related to this conf."""
if identifier == "include":
return ["#include <SMpu9250.h>"]
elif identifier == "functions_all":
return ["SMPU_FUNCTIONS_ALL"]
return []
| [
"alklasil@student.oulu.fi"
] | alklasil@student.oulu.fi |
9fa1ef1faa86b56dd8b2d4ab8a53d48258f7bed6 | b45fc189b5dc879d179a7f90c450fb08f661e686 | /convertNBG.py | 2168cd7804a5b4815d72ee7f74ff7cbac1e14984 | [] | no_license | eigeneko/ATF | 30096572ac7cbb442acbc5717085fec7611f53e5 | 8e97f5f250dac50f01ec81b551f3a9d391406a9d | refs/heads/master | 2020-04-15T04:43:35.692435 | 2019-01-07T07:40:31 | 2019-01-07T07:40:31 | 158,235,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | import os
import subprocess
import argparse
from utils import autoNBG
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='FreeEnCal Outfile Compare')
parser.add_argument(dest='task', metavar='task', nargs=1, help='Input the name of logic fragments you want to compare')
args = parser.parse_args()
function = 'predicateLogicConvert.py'
logicFrag = args.task[0]
prefix = 'format_'
autoNBG(scripts=function, logicalFrag=logicFrag, inputPrefix=prefix) | [
"observelily@gmail.com"
] | observelily@gmail.com |
7a821db6e73317f1eda8b4668d934a936b9bc173 | efb3d0c2f9fcc5be631323e31f4b8dfcdd0ab676 | /compiler/tests/14_replica_column_test.py | c8d50a539879db74ee9e9e7d09880960e2cc6270 | [
"BSD-3-Clause"
] | permissive | kanokkorn/OpenRAM | 5f30beb35e3c161fbf0d233b59fe7d7805d3c348 | 3a9693e37fd3afbd52001839966b0f2811fb4ccd | refs/heads/master | 2022-06-03T12:53:47.750245 | 2022-05-27T15:53:05 | 2022-05-27T15:53:05 | 189,780,330 | 0 | 0 | BSD-3-Clause | 2021-04-07T06:49:08 | 2019-06-01T21:47:50 | Python | UTF-8 | Python | false | false | 1,291 | py | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California
# All rights reserved.
#
import unittest
from testutils import *
import sys, os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class replica_column_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
if OPTS.tech_name == "sky130":
num_spare_rows = 1
num_spare_cols = 1
else:
num_spare_rows = 0
num_spare_cols = 0
debug.info(2, "Testing replica column for single port")
a = factory.create(module_type="replica_column",
rows=4 + num_spare_rows,
rbl=[1, 0],
replica_bit=1,
column_offset=num_spare_cols)
self.local_check(a)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| [
"mrg@ucsc.edu"
] | mrg@ucsc.edu |
52f9e019ca59ad581223697ba63f672d9198b805 | 87390bcd42b1b56a3c6235f5a4a304386b8be963 | /src/evaluation.py | 87776a238d1308b23102bb669ec461c1c7896584 | [] | no_license | emalgorithm/rna-design | d195a79c829e2e9d9beaeea5e177704ad53ab380 | aec77a18abe4850958d6736ec185a6f8cbfdf20c | refs/heads/master | 2020-04-22T10:24:14.593635 | 2019-05-11T22:11:23 | 2019-05-11T22:11:23 | 170,304,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,239 | py | from sklearn.metrics import hamming_loss
import numpy as np
import torch
import RNA
from src.data_util.data_processing import one_hot_embed_sequence, prepare_sequences, decode_sequence
from src.data_util.data_constants import word_to_ix, tag_to_ix, ix_to_word, ix_to_tag
from sklearn.metrics import accuracy_score, f1_score, precision_score
def masked_hamming_loss(target, pred, ignore_idx=0):
mask = target != ignore_idx
return hamming_loss(target[mask], pred[mask])
def compute_accuracy(target, pred, ignore_idx=0):
accuracy = 0
for i in range(len(target)):
mask = target[i] != ignore_idx
accuracy += 1 if np.array_equal(target[i][mask], pred[i][mask]) else 0
return accuracy / len(pred)
def compute_metrics_graph(target_dot_brackets, input_sequences, pred_sequences_scores, batch,
verbose=False):
pred_sequence = pred_sequences_scores.max(1)[1]
target_dot_brackets_np = []
pred_sequences_np = []
input_sequences_np = []
for j, i in enumerate(batch):
if len(target_dot_brackets_np) <= i:
pred_sequences_np.append([])
target_dot_brackets_np.append([])
input_sequences_np.append([])
pred_sequences_np[i].append(pred_sequence[j].item())
target_dot_brackets_np[i].append(target_dot_brackets[j].item())
input_sequences_np[i].append(input_sequences[j].item())
dot_brackets_strings = [decode_sequence(dot_bracket, ix_to_tag) for i, dot_bracket in
enumerate(target_dot_brackets_np)]
sequences_strings = [decode_sequence(sequence, ix_to_word) for i, sequence in enumerate(
input_sequences_np)]
pred_sequences_strings = [decode_sequence(pred, ix_to_word).replace('<PAD>', 'A') for i, pred in enumerate(
pred_sequences_np)]
pred_dot_brackets_strings = [RNA.fold(pred_sequences_strings[i])[0] for
i, pred_sequence in enumerate(pred_sequences_strings)]
h_loss = np.mean([hamming_loss(list(dot_brackets_strings[i]),
list(pred_dot_brackets_strings[i])) for i in range(len(
pred_dot_brackets_strings))])
accuracy = np.mean([1 if (dot_brackets_strings[i] == pred_dot_brackets_strings[i]) else 0 for i
in range(len(pred_dot_brackets_strings))])
if verbose:
for i in range(len(dot_brackets_strings)):
print("REAL SEQUENCE: {}".format(sequences_strings[i]))
print("PRED SEQUENCE: {}".format(pred_sequences_strings[i]))
print("REAL: {}".format(dot_brackets_strings[i]))
print("PRED: {}".format(pred_dot_brackets_strings[i]))
print()
return h_loss, accuracy
# return 0, 0
def compute_metrics(target_dot_brackets, input_sequences, pred_sequences_scores, sequences_lengths,
verbose=False):
dot_brackets_strings = [decode_sequence(dot_bracket.cpu().numpy()[:sequences_lengths[
i]], ix_to_tag) for i, dot_bracket in enumerate(target_dot_brackets)]
sequences_strings = [decode_sequence(sequence.cpu().numpy()[:sequences_lengths[
i]], ix_to_word) for i, sequence in enumerate(input_sequences)]
pred_sequences_np = pred_sequences_scores.max(2)[1].cpu().numpy()
pred_sequences_strings = [decode_sequence(pred[:sequences_lengths[i]], ix_to_word) for i,
pred in enumerate(pred_sequences_np)]
pred_dot_brackets_strings = [RNA.fold(pred_sequences_strings[i])[0] for
i, pred_sequence in enumerate(pred_sequences_strings)]
h_loss = np.mean([hamming_loss(list(dot_brackets_strings[i]),
list(pred_dot_brackets_strings[i])) for i in range(len(
pred_dot_brackets_strings))])
accuracy = np.mean([1 if (dot_brackets_strings[i] == pred_dot_brackets_strings[i]) else 0 for i
in range(len(pred_dot_brackets_strings))])
if verbose:
for i in range(len(dot_brackets_strings)):
print("REAL SEQUENCE: {}".format(sequences_strings[i]))
print("PRED SEQUENCE: {}".format(pred_sequences_strings[i]))
print("REAL: {}".format(dot_brackets_strings[i]))
print("PRED: {}".format(pred_dot_brackets_strings[i]))
print()
return h_loss, accuracy
# return 0, 0
def evaluate(model, test_loader, loss_function, batch_size, mode='test', device='cpu'):
model.eval()
with torch.no_grad():
losses = []
h_losses = []
accuracies = []
for batch_idx, (sequences, dot_brackets, sequences_lengths) in enumerate(test_loader):
sequences = sequences.to(device)
dot_brackets = dot_brackets.to(device)
sequences_lengths = sequences_lengths.to(device)
# Skip last batch if it does not have full size
if sequences.shape[0] < batch_size:
continue
base_scores = model(sequences, sequences_lengths)
losses.append(loss_function(base_scores.view(-1, base_scores.shape[2]),
dot_brackets.view(-1)))
avg_h_loss, avg_accuracy = compute_metrics(base_scores, dot_brackets)
h_losses.append(avg_h_loss)
accuracies.append(avg_accuracy)
avg_loss = np.mean(losses)
avg_h_loss = np.mean(h_losses)
avg_accuracy = np.mean(accuracies)
print("{} loss: {}".format(mode, avg_loss))
print("{} hamming loss: {}".format(mode, avg_h_loss))
print("{} accuracy: {}".format(mode, avg_accuracy))
return avg_loss, avg_h_loss, avg_accuracy
def evaluate_struct_to_seq(model, test_loader, loss_function, batch_size, mode='test',
device='cpu', verbose=False):
model.eval()
with torch.no_grad():
losses = []
h_losses = []
accuracies = []
for batch_idx, (dot_brackets, sequences, sequences_lengths) in enumerate(test_loader):
dot_brackets = dot_brackets.to(device)
sequences = sequences.to(device)
sequences_lengths = sequences_lengths.to(device)
# Skip last batch if it does not have full size
if dot_brackets.shape[0] < batch_size:
continue
base_scores = model(dot_brackets, sequences_lengths)
losses.append(loss_function(base_scores.view(-1, base_scores.shape[2]),
dot_brackets.view(-1)).item())
avg_h_loss, avg_accuracy = compute_metrics(target_dot_brackets=dot_brackets,
input_sequences=sequences,
pred_sequences_scores=base_scores,
sequences_lengths=sequences_lengths,
verbose=verbose)
h_losses.append(avg_h_loss)
accuracies.append(avg_accuracy)
avg_loss = np.mean(losses)
avg_h_loss = np.mean(h_losses)
avg_accuracy = np.mean(accuracies)
print("{} loss: {}".format(mode, avg_loss))
print("{} hamming loss: {}".format(mode, avg_h_loss))
print("{} accuracy: {}".format(mode, avg_accuracy))
return avg_loss, avg_h_loss, avg_accuracy
def evaluate_struct_to_seq_graph(model, test_loader, loss_function=None, batch_size=None,
mode='test', device='cpu', verbose=False, gan=False,
n_random_features=0):
model.eval()
with torch.no_grad():
losses = []
h_losses = []
accuracies = []
for batch_idx, data in enumerate(test_loader):
data.x = data.x.to(device)
data.edge_index = data.edge_index.to(device)
data.edge_attr = data.edge_attr.to(device)
data.batch = data.batch.to(device)
dot_bracket = data.y.to(device)
sequence = data.sequence.to(device)
if gan:
z = torch.Tensor(np.random.normal(0, 1, (data.x.shape[0], n_random_features))).to(
device)
data.x = torch.cat((data.x, z), dim=1)
pred_sequences_scores = model(data)
if loss_function:
losses.append(loss_function(pred_sequences_scores, sequence).item())
# Metrics are computed with respect to generated folding
avg_h_loss, avg_accuracy = compute_metrics_graph(target_dot_brackets=dot_bracket,
input_sequences=sequence,
pred_sequences_scores=pred_sequences_scores,
batch=data.batch,
verbose=verbose)
h_losses.append(avg_h_loss)
accuracies.append(avg_accuracy)
avg_loss = 0 if not losses else np.mean(losses)
avg_h_loss = np.mean(h_losses)
avg_accuracy = np.mean(accuracies)
print("{} loss: {}".format(mode, avg_loss))
print("{} hamming loss: {}".format(mode, avg_h_loss))
print("{} accuracy: {}".format(mode, avg_accuracy))
return avg_loss, avg_h_loss, avg_accuracy
def evaluate_family_classifier(model, test_loader, loss_function=None, batch_size=None,
mode='test', device='cpu', verbose=False):
model.eval()
with torch.no_grad():
losses = []
accuracies = []
for batch_idx, data in enumerate(test_loader):
data.x = data.x.to(device)
data.edge_index = data.edge_index.to(device)
data.edge_attr = data.edge_attr.to(device)
data.batch = data.batch.to(device)
data.y = data.y.to(device)
out = model(data)
# Loss is computed with respect to the target sequence
loss = loss_function(out, data.y)
losses.append(loss.item())
pred = out.max(1)[1]
accuracy = compute_metrics_family(data.y, pred)
accuracies.append(accuracy)
avg_loss = np.mean(losses)
avg_accuracy = np.mean(accuracies)
print("{} loss: {}".format(mode, avg_loss))
print("{} accuracy: {}".format(mode, avg_accuracy))
return avg_loss, avg_accuracy
def compute_metrics_family(target, pred):
# accuracy = accuracy_score(target, pred)
accuracy = (target.eq(pred.long())).sum().item() / target.shape[0]
return accuracy
def get_sensitivity(cf):
# tp / (tp + fn)
sensitivity = 0
for c in range(len(cf)):
tp = cf[c, c]
fn = np.sum(cf[:, c]) - tp
sensitivity += tp / (tp + fn)
return sensitivity / len(cf)
def get_specificity(cf):
specificity = 0
for c in range(len(cf)):
tp = cf[c, c]
fp = np.sum(cf[c, :]) - tp
fn = np.sum(cf[:, c]) - tp
tn = np.sum(cf) - tp - fp - fn
specificity += tn / (tn + fp)
return specificity / len(cf)
| [
"emanuele.rossi1909@gmail.com"
] | emanuele.rossi1909@gmail.com |
c9ff13c653b1b1f790075ec691191d371ddc12d8 | bdd6168675bbb09d483119afd6accceb13666b14 | /modimport/browser.py | 2550cbde5bdff1ea36cb8675519afdc96012743c | [] | no_license | AnDa-creator/Python-games-and-projects | e9c14730eaeef54d713f35cfbdcb967ed9fab263 | 7ff9dd58c6602defa638d4e28976b7fbeb0a2b2c | refs/heads/master | 2022-12-04T12:12:42.424375 | 2020-08-13T15:25:09 | 2020-08-13T15:25:09 | 287,167,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | import webbrowser
webbrowser.open("http") | [
"anuranan.fifa14@gmail.com"
] | anuranan.fifa14@gmail.com |
7b205e91d3d2e6bea20b6b48b78dc7bf2b245db8 | c908dacdc0006e247aa529dddb98bc1d67fbf7c8 | /user.py | c2f9669f15bbddd02c3b88046a27e25547ba194d | [] | no_license | TomeCirun/flask_blog | 40e3bd041fd7ba376c181073c92e19f296aca928 | de34ac14e2e3e2044e3f327e288eefadf34b7faf | refs/heads/main | 2023-03-05T13:51:37.335673 | 2021-02-17T12:04:00 | 2021-02-17T12:04:00 | 339,709,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
class User():
def __init__(self,id,username,password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return f'User Id: {self.id}' | [
"cirun@live.com"
] | cirun@live.com |
44f99ab506d13bdff1cbf6562817d69a237ad312 | 55fced1ccebe4e517b75b2efffce21ed908b9fbc | /getVeryCode.py | a3652fe4320d4a282ef7631cb18787504398aa48 | [] | no_license | Jashin-Hitagi/cpquery-spider | 353373f74cbdce436045c9a5ac7ea31b29091958 | fda0230a4bc577f307b06b441aa7cc8de2edb48d | refs/heads/master | 2023-07-06T23:36:03.265782 | 2021-08-12T02:40:04 | 2021-08-12T02:40:04 | 395,161,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,384 | py | import json
from aip import AipOcr
import re
def binarizing(img): # input: gray image
threshold = 200
pixdata = img.load()
w, h = img.size
for y in range(h):
for x in range(w):
if pixdata[x, y] < threshold:
pixdata[x, y] = 0
else:
pixdata[x, y] = 255
return img
def del_other_dots(img):
pixdata = img.load()
w, h = img.size
for i in range(h): # 最左列和最右列
# print(pixdata[0, i]) # 最左边一列的像素点信息
# print(pixdata[w-1, i]) # 最右边一列的像素点信息
if pixdata[0, i] == 0 and pixdata[1, i] == 255:
pixdata[0, i] = 255
if pixdata[w - 1, i] == 0 and pixdata[w - 2, i] == 255:
pixdata[w - 1, i] = 255
for i in range(w): # 最上行和最下行
# print(pixdata[i, 0]) # 最上边一行的像素点信息
# print(pixdata[i, h-1]) # 最下边一行的像素点信息
if pixdata[i, 0] == 0 and pixdata[i, 1] == 255:
pixdata[i, 0] = 255
if pixdata[i, h - 1] == 0 and pixdata[i, h - 2] == 255:
pixdata[i, h - 1] = 255
for y in range(1, h - 1):
for x in range(1, w - 1):
if pixdata[x, y] == 0: # 遍历除了四个边界之外的像素黑点
count = 0 # 统计某个黑色像素点周围九宫格中白块的数量(最多8个)
if pixdata[x + 1, y + 1] == 255:
count = count + 1
if pixdata[x + 1, y] == 255:
count = count + 1
if pixdata[x + 1, y - 1] == 255:
count = count + 1
if pixdata[x, y + 1] == 255:
count = count + 1
if pixdata[x, y - 1] == 255:
count = count + 1
if pixdata[x - 1, y + 1] == 255:
count = count + 1
if pixdata[x - 1, y] == 255:
count = count + 1
if pixdata[x - 1, y - 1] == 255:
count = count + 1
if count > 4:
# print('位置:(' + str(x) + ', ' + str(y) + ')----' + str(count))
pixdata[x, y] = 255
for i in range(h): # 最左列和最右列
if pixdata[0, i] == 0 and pixdata[1, i] == 255:
pixdata[0, i] = 255
if pixdata[w - 1, i] == 0 and pixdata[w - 2, i] == 255:
pixdata[w - 1, i] = 255
for i in range(w): # 最上行和最下行
if pixdata[i, 0] == 0 and pixdata[i, 1] == 255:
pixdata[i, 0] = 255
if pixdata[i, h - 1] == 0 and pixdata[i, h - 2] == 255:
pixdata[i, h - 1] = 255
return img
# 对文字识别后的验证进行格式校验和计算
def getCode(data):
global verycode
pattern = '\d[\+\-]\d=?'
if re.match(pattern, data):
strs = list(data)
if strs[1] == '+':
verycode = int(strs[0]) + int(strs[2])
if strs[1] == '-':
verycode = int(strs[0]) - int(strs[2])
print("验证码计算结果为:", verycode)
return verycode
else:
print("验证码校验格式不匹配,正在重试获取验证码")
return 404
def getRealCode(image):
# 百度api
APP_ID = '23849653'
API_KEY = '3AH9H5ejMnhFTX1sM4bk9P03'
SECRET_KEY = 'vytkw76cbWDnzOUvOFWdqVD8dLXGGr66'
ocr = AipOcr(APP_ID, API_KEY, SECRET_KEY)
image = binarizing(image) # 二值化
image = del_other_dots(image) # 降噪
image.save("./file/image.png") # 图片保存
# 二进制方式打开图片文件
f = open(r'./file/image.png', 'rb')
img = f.read()
# res = ocr.basicGeneral(img) # 标准精度文字识别
res = ocr.basicAccurate(img) # 高精度文字识别
# 对结果进行遍历
if res.get('words_result'):
result = res.get('words_result').__getitem__(0)
json_str = json.dumps(result, sort_keys=True)
params_json = json.loads(json_str)
items = params_json.items()
for key, value in items:
print('文字识别结果:', str(value))
result = getCode(str(value)) # 验证结果是否符合规则并进行计算
return result
else:
print('文字识别失败,正在重试获取验证码')
return 404
| [
"wh775349543@gmail.com"
] | wh775349543@gmail.com |
ac7445a68c219f3dd9d06e99c7644d3df11f37fd | 43fa572169fb7f07e360afcbdde393abc0508b14 | /GOLsparseMatrix.py | 876f998a7e04faa0e3e52f583ddf5873cd6ce92b | [] | no_license | ankitamishra1998/GameofLife | c445674f4d3cd56e8eb6c040da5ff0a001e329e8 | 8732ab05569da63344ca3df071e0c9b82fbf44f8 | refs/heads/master | 2021-05-10T13:46:43.098566 | 2018-01-25T05:30:17 | 2018-01-25T05:30:17 | 118,489,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | print ("Game of Life")
import sys
import time
import tkinter
from tkinter import *
import numpy as np
from scipy.sparse import csr_matrix
def count(arr, tup, d, x):
if tup[0] >= 0 and tup[0] < d and tup[1] >= 0 and tup[1] < d :
if arr[tup] == 1:
x += 1
return x
##row = [1,0,1,2,2]
##col = [0,2,2,1,2]
##data = [1,1,1,1,1]
##d = 5
##m = max(max(row), max(col)) + 1
##tup = [(1,0), (0,2), (1,2), (2,1), (2,2)]
##arr = csr_matrix((data, (row, col)), shape=(m,m)).toarray()
##row = [1,1,1]
##col = [0,1,2]
##data = [1,1,1]
##d = 3
##m = max(max(row), max(col)) + 1
##print(m)
##tup = [(1,0), (1,1), (1,2)]
##arr = csr_matrix((data, (row, col)), shape=(m,m)).toarray()
def count_neighbours(i, j, row, col, tup):
c = 0
neighbour = [(i-1, j-1), (i, j-1), (i+1, j-1), (i-1, j), (i+1, j), (i-1, j+1), (i, j+1), (i+1, j+1)]
for n in range(len(neighbour)):
if neighbour[n] in tup:
c += 1
return c
##c = count_neighbours(-1, 0, row, col, tup)
##print(c)
##tup_del = []
##for i in range(m):
## for j in range(m):
## count = count_neighbours(i, j, row, col, tup)
## if count == 3:
## tup_del.append((i,j))
##
##print(tup_del)
def start_game():
row = []
col = []
data = [1]
while 1:
r = input("Life Cell - row: ")
if r == 'q': break
else:
row += [int(r)]
c = input("Life Cell - col: ")
if c == 'q': break
else:
col += [int(c)]
d = len(row)
data *= d
m = max(max(row), max(col)) + 1
arr = csr_matrix((data, (row, col)), shape=(d,d)).toarray()
print(arr)
tup_arr = []
for i in range(d):
tup_arr.append((row[i],col[i]))
print(tup_arr)
g = 20
generation(tup_arr, m, data, row, col, g)
def create_grid(live_tup, d, row, col):
all_tup = []
for i in range(d):
for j in range(d):
all_tup.append((i,j))
for k in all_tup:
if k in live_tup:
t = "|||"
else:
t = ""
Label(text=t, relief=RIDGE, width=5).grid(row=k[0], column=k[1])
mainloop()
def generation(tup, m, data, row, col, g):
tup_del = []
tup_live = []
for i in range(-1, m + 1):
for j in range(-1, m + 1):
count = count_neighbours(i, j, row, col, tup)
if count < 2 or count > 3:
tup_del.append((i,j))
elif count == 3:
tup_live.append((i,j))
for r in tup_del:
if r in tup:
tup.remove(r)
for a in tup_live:
if a not in tup:
tup.append(a)
row = []
col = []
for n in tup:
row.append(n[0])
col.append(n[1])
data = [1]*len(row)
d = len(row)
if d == 0:
print("Game Over")
sys.exit()
m = max(max(row), max(col)) + 1
arr = csr_matrix((data, (row, col)), shape=(m+2,m+2)).toarray()
print(arr)
create_grid(tup, len(row), row, col)
if g > 1:
generation(tup, m, data, row, col, g-1)
start_game()
| [
"ankitamishra1998@gmail.com"
] | ankitamishra1998@gmail.com |
beb8c8d74b03478d2ab23ab25ba45c5ef6828866 | 63adbb3123e8cdc9afa46075a16e9ec45bba55c9 | /assignment9.py | c87338f65e9e121b786fe7596d56544cf8352142 | [] | no_license | ranveer1691/python | db1fd275721158d72633fa9d22785f35b2e33ec6 | ceafa13494f9cee85ec72e64a0c7dab504d80c7d | refs/heads/master | 2020-03-26T17:13:47.282600 | 2018-09-07T18:55:27 | 2018-09-07T18:55:27 | 145,149,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # ASSIGNMENT-9
#Question-1
try:
a = 3
a < 4
a = a / (a - 3)
except ZeroDivisionError as msg:
print('wrong', msg)
#Queation-2
try:
l = [1, 2, 3]
print(l[3])
except IndexError as msg:
print('something went wrong', msg)
#Question-3
'''
NameError:hi there
'''
#Question-4
# output
'''
-5
a/b result in 0
'''
#Question-5
try:
l = [1, 2, 3]
print(l[3])
except IndexError as msg:
print('Index errror occurs in list', msg)
# Value Error-
try:
a = int(input('enter a number'))
print(a + 2)
except ValueError as msg:
print('Value error occurs in a list', msg)
# Import Error-
try:
import abcde
except ImportError as msg:
print(msg)
| [
"noreply@github.com"
] | noreply@github.com |
966fdf7eb6a1eec1702eaaaca18fe580ed0748ef | a7870a13babdcac3b680980edb516591a4a4fb07 | /shared/inspect.py | 70ee76c020289fa67c6b547a213a6657bad2835e | [] | no_license | mustafaghali/clouds_dist | 2cc6da2fcbfc1c3c61005ea8316ae31208879f5b | 57d5be4e15c3d4171fda7a58832225eb289f3b8c | refs/heads/master | 2022-04-08T19:00:37.981332 | 2020-02-10T15:12:50 | 2020-02-10T15:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,519 | py | #!/usr/bin/env python
"""
Utilities to Study Model Results
"""
from src.data import get_loader, get_transforms
from src.stats import get_stats
from src.utils import get_opts
import argparse
import numpy as np
import pandas
import pathlib
import seaborn as sns
import src.gan as gan
import torch
def infer(model, loader, model_opts, M=1000):
"""
Predictions on a Subset
:param model: A torch model nn.Module object, to use for making
predictions.
:param samples: A torch dataloader object.
:param M: The maximum number of batches to go through.
:return y_hat: A 4D torch tensor, with dimensions corresponding to, sample
x channel x width x height.
"""
result = []
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
M = min(len(loader.dataset) / loader.batch_size, M)
for m, batch in enumerate(loader):
if m > M: break
print(f"Inferring batch {m}/{M}")
x = get_noisy_input_tensor(batch, model_opts)
y_hat = model.g(x.to(device))
result.append(y_hat.detach().cpu())
return torch.cat(result)
def histogram(x, sample_frac=0.3, out_dir=None):
"""
Values from Random Tensor Indices
:param x: A torch tensor or arbitrary dimension
:param sample_frac: A float specifying what fraction of indices to include
in the histogram
:out_dir: The directory to save the figure.
:return None, but saves figure.png in `out_dir`
"""
if not out_dir:
out_dir = pathlib.Path.cwd()
x = x.numpy().flatten()
indices = np.random.choice(len(x), int(len(x) * sample_frac))
sns.distplot(x[indices]).figuresavefig(pathlib.Path(out_dir, "histogram.png"))
def y_scatter(y, y_hat, sample_frac=0.3, out_dir=None):
"""
Scatterplot of y vs. y_hat
:param y: Pandas data frame of raw output, as saved by save_iterator
:param y_hat: Pandas data frame of raw predictions, as saved by
save_iterator
:param sample_frac: Proportion of pixels (across w x h x c) to keep when
plotting
:out_dir: The directory to save the outputs to.
"""
if not out_dir:
out_dir = pathlib.Path.cwd()
y, y_hat = y.values.flatten(), y_hat.values.flatten()
indices = np.random.choice(len(y), int(len(y) * sample_frac))
p = sns.jointplot(
y[indices],
y_hat[indices],
color="black",
kind="hex",
bins=400,
gridsize=50
)
p.set_axis_labels('y', 'y_hat', fontsize=16)
p.savefig(pathlib.Path(out_dir, "scatterplot.png"))
def save_line(z, f, round_level=4):
"""
1D Array -> String for line in CSV
"""
str_fun = lambda z: ",".join(np.round(z, round_level).astype(str))
f.write(str_fun(z.flatten().numpy()))
f.write("\n")
def get_noise_tensor(model_opts, shape):
"""Functional version of method in src/train.py"""
b, h, w = shape[0], shape[2], shape[3]
Ctot = model_opts.Cin + model_opts.Cnoise
noise_tensor = torch.FloatTensor(b, Ctot, h, w)
noise_tensor.uniform_(-1, 1)
return noise_tensor
def get_noisy_input_tensor(batch, model_opts):
input_tensor = get_noise_tensor(model_opts, batch["metos"].shape)
input_tensor[:, : model_opts.Cin, :, :] = batch["metos"]
return input_tensor
def loader_gen(loader, key="metos"):
"""
Wrapper for Loaders
"""
for m, batch in enumerate(loader):
for i in range(len(batch[key])):
yield batch[key][i]
def tensor_gen(z):
"""
Tensor -> Iterator
"""
for i in range(len(z)):
yield z[i]
def save_iterator(iterator, out_path="x.csv", crop_ix=None, M=1000):
"""
Save Iterator to File Incrementally
"""
with open(out_path, "w") as f:
for m, sample in enumerate(iterator):
if m > M: break
print(f"Extracting batch {m} [at most {M} will be saved]")
cropped = sample
if crop_ix:
cropped = cropped[:, crop_ix[0]:crop_ix[1], crop_ix[0]:crop_ix[1]]
save_line(cropped, f)
def loader_from_run(opts_path, data_path=None):
"""
Get Loader (with transforms) from Experiment
"""
if not data_path:
opts["data"]["path"] = opts["data"]["original_path"]
else:
opts["data"]["path"] = data_path
print("getting transforms")
transfs = get_transforms(opts)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("getting stats")
stats = get_stats(opts, transfs)
loader, _ = get_loader(opts, transfs, stats)
return loader
def model_from_run(opts, checkpoints_dir, model_name):
"""
Get Model from Experiment
"""
model_path = pathlib.Path(checkpoints_dir, model_name)
state = torch.load(model_path)["state_dict"]
model = gan.GAN(
opts["model"]["Cin"],
opts["model"]["Cout"],
opts["model"]["Cnoise"],
bottleneck_dim=opts["model"]["bottleneck_dim"]
)
model.load_state_dict(state)
return model
if __name__ == '__main__':
# get file arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--conf_path",
type=str,
default= "/scratch/sankarak/clouds/regression-run--3/run_0/short-run-cropped.yaml",
help="The full path to the configuration file in the experiment that you want to analyze."
)
parser.add_argument(
"-m",
"--model_pt",
type=str,
default= "state_latest.pt",
help="The name of the checkpoint whose predictions you want to study"
)
args = parser.parse_args()
opts = get_opts(args.conf_path)
# get the model and loader
checkpoints_dir = pathlib.Path(pathlib.Path(args.conf_path).parent, "checkpoints")
model = model_from_run(opts, checkpoints_dir, args.model_pt)
loader = loader_from_run(opts)
# make predictions and summarize
y_hat = infer(model, loader, opts["model"])
save_iterator(tensor_gen(y_hat), "y_hat.csv")
save_iterator(loader_gen(loader, "real_imgs"), "y.csv")
save_iterator(loader_gen(loader, "metos"), "x.csv", (50, 60))
# make some plots
one_row = next(tensor_gen(y_hat)).numpy().flatten()
usecols = np.random.choice(range(len(one_row)), 2000, replace=False)
y = pd.read_csv("y.csv", header=None, usecols=usecols, names=range(len(one_row)))
y_hat = pd.read_csv("y_hat.csv", header=None, usecols=usecols, names=range(len(one_row)))
y_scatter(y, y_hat)
| [
"sankaran.kris@gmail.com"
] | sankaran.kris@gmail.com |
278c8f0ddb3c42fcfe4c7fc1b142419697180d07 | dd81260c780c3697bb98b5e93f268eed5ebbed71 | /mysite/settings.py | 185358c81c7684816b6fe53f73df0c3437efc548 | [] | no_license | josecd85/my-first-blog | 08a00adae0dd6cda047b09d53a56d92430e24e13 | 5f24ae1cfbc94e6d5f4a19875560381faeea1f0f | refs/heads/master | 2021-06-25T13:28:27.968413 | 2020-11-13T12:15:06 | 2020-11-13T12:15:06 | 169,381,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,442 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^eun^gr-1q(sap3w^o(9f^qc2xfldepfw32!-6x)h==jwe@cgm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'josecd85.pythonanywhere.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'ddd',
# 'crispy_forms',
]
# CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['C:/virtualEnv/blog/templates/blog'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
""" 'default': {
'ENGINE': 'django.db.backends.oracle',
'NAME': 'CFCORCL',
'USER': 'DJANGO',
'PASSWORD': 'DJANGO',
'HOST': 'ENDESA-EVXFBDR2',¡ñ
'PORT': '1521'
}
"""
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/home'
# Logging
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
""" | [
"joscabdor@gmail.com"
] | joscabdor@gmail.com |
f69ba9cebdceefbbc74e3b1897f434dfd5b5792d | 88492694761b3a4a09a612b13e62b1339ba5e69c | /src/search/loader.py | 781fa5e9b5c61ab6a5c9839f896e188ac36c234e | [
"MIT"
] | permissive | igarashi339/disney-network | cff257704b932cadc67fd78e659a994ce8ce74e5 | 76d3a628add4d268d2b8864c978bf9a081d4be70 | refs/heads/main | 2023-09-03T12:55:27.937299 | 2021-11-07T13:23:36 | 2021-11-07T13:23:36 | 357,179,122 | 2 | 0 | MIT | 2021-10-02T11:15:25 | 2021-04-12T12:08:45 | Python | UTF-8 | Python | false | false | 1,322 | py | import json
class Loader:
def __init__(self, input_data_path):
self.spots_json_path = input_data_path + "spots.json"
self.links_json_path = input_data_path + "links.json"
self.nodes_json_path = input_data_path + "nodes.json"
def get_nearest_node_id(self, spot_id_org, spot_id_dst):
org_node_id = -1
dst_node_id = -1
with open(self.spots_json_path, "r", encoding="utf-8") as f:
json_data = json.load(f)
for spot in json_data["spots"]:
if spot_id_org == spot["spot-id"]:
org_node_id = spot["nearest-node-id"]
if spot_id_dst == spot["spot-id"]:
dst_node_id = spot["nearest-node-id"]
return org_node_id, dst_node_id
def get_nodes(self):
with open(self.nodes_json_path, "r", encoding="utf-8") as f:
json_data = json.load(f)
return json_data["nodes"]
def get_links(self):
with open(self.links_json_path, "r", encoding="utf-8") as f:
json_data = json.load(f)
return json_data["links"]
def get_spots(self):
with open(self.spots_json_path, "r", encoding="utf-8") as f:
json_data = json.load(f)
return json_data["spots"] | [
"igarashi339@gmail.com"
] | igarashi339@gmail.com |
5bf3b709d650e67287d4bfbbb1887821906461cd | fd1b7ffd30143dc0aa0918cab6107f1013b8c4ea | /series/migrations/0005_auto__add_language__add_userserial.py | c48926f92a8931ecabc44c70ec119af1855ae306 | [] | no_license | apelliciari/watch-the-series | 4861ea9a063a7e6cda25feaf0f9a4a996cca20a7 | cec2b92edd1439f81a529685301bdfed2eb991ee | refs/heads/master | 2021-01-22T07:10:38.818076 | 2013-05-16T16:25:29 | 2013-05-16T16:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,929 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Language'
db.create_table(u'language', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('iso', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, blank=True)),
))
db.send_create_signal(u'series', ['Language'])
# Adding model 'UserSerial'
db.create_table(u'user_serial', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='xserials', to=orm['series.User'])),
('serial', self.gf('django.db.models.fields.related.ForeignKey')(related_name='xusers', to=orm['series.Serial'])),
('language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['series.Language'])),
('completed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, blank=True)),
))
db.send_create_signal(u'series', ['UserSerial'])
def backwards(self, orm):
# Deleting model 'Language'
db.delete_table(u'language')
# Deleting model 'UserSerial'
db.delete_table(u'user_serial')
models = {
u'series.language': {
'Meta': {'object_name': 'Language', 'db_table': "u'language'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'series.season': {
'Meta': {'object_name': 'Season', 'db_table': "u'season'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'episode_number': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seasons'", 'to': u"orm['series.Serial']"}),
'thetvdb_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'series.serial': {
'Meta': {'object_name': 'Serial', 'db_table': "u'serial'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'thetvdb_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'thetvdb_last_updated': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'series.user': {
'Meta': {'object_name': 'User'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'medaglie': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['series.Season']", 'through': u"orm['series.UserSeason']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'series.userseason': {
'Meta': {'object_name': 'UserSeason', 'db_table': "u'user_season'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_episode_seen': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'last_episode_unfinished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'season': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xusers'", 'to': u"orm['series.Season']"}),
'season_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xseasons'", 'to': u"orm['series.User']"})
},
u'series.userserial': {
'Meta': {'object_name': 'UserSerial', 'db_table': "u'user_serial'"},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['series.Language']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xusers'", 'to': u"orm['series.Serial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xserials'", 'to': u"orm['series.User']"})
}
}
complete_apps = ['series'] | [
"isac.newton@gmail.com"
] | isac.newton@gmail.com |
05890e0e58f21f43e1df0280cae5ee8aba8974ad | 79e85494418b7236ab4519f35ee6a22349b3c531 | /TensorflowMNIST/tfMNIST.py | 4056e442d3bd80dd565a0fc8a2a8fe5c77f3f1d3 | [
"MIT"
] | permissive | raghavgupta0296/Learning-Deep-Learning-Libraries | 1e3014b88c7db7fc7a10957ade724d3de1fceef4 | 15cd020f1468d47bc9e09bcc12933a6aa8004792 | refs/heads/master | 2021-06-14T04:39:21.862036 | 2017-03-21T05:48:44 | 2017-03-21T05:48:44 | 68,845,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
# print (mnist)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32,shape=[None,784])
y = tf.placeholder(tf.float32,shape=[None,10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
pred = tf.matmul(x,W) + b
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
correctPred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
acc = tf.reduce_mean(tf.cast(correctPred,tf.float32))
for i in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x:batch[0],y:batch[1]})
print (acc.eval(feed_dict={x:mnist.test.images,y:mnist.test.labels}))
| [
"noreply@github.com"
] | noreply@github.com |
29a430dbc328601d477386f8cabc243e74005619 | 5df9c5d9f84198f71dc4d6395f60f9735745ac74 | /una_sym_util.py | d2ef2ebd352cd6f4319c2c3e558de5c2ecdf8cbf | [] | no_license | natteruw/crystal_pymolscripts | 545843209397ff6668666df5c4a538e85761499a | 19cb06dd247a0c5727eca6a6c6408f75ed192101 | refs/heads/master | 2021-01-19T11:48:39.268407 | 2018-03-01T22:15:09 | 2018-03-01T22:15:09 | 87,993,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,974 | py | # -*- mode:python;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
import sys,os,inspect,functools
newpath = os.path.dirname(inspect.getfile(inspect.currentframe())) # script directory
if not newpath in sys.path: sys.path.append(newpath)
import string,re,gzip,itertools
import collections
from pymol_util import *
import operator as op
from xyzMath import *
from itertools import product,ifilter
from cluster import HierarchicalClustering
nsymmetrizecx = 0
def get_xforms_by_chain(sele="all",verbose=False,userms=False):
print "Hi, this is sym_util's get_xforms_by_chain"
v = cmd.get_view()
cen = com("("+sele+") and (name CA and not HET)")
chains = cmd.get_chains(sele)
xforms = dict()
maxrms = 0.0
for c1,c2 in filter( lambda t: True, product(chains,chains) ):
refsele = "((%s) and chain %s and name CA and not HET)"%(sele,c1)
movsele = "((%s) and chain %s and name CA and not HET)"%(sele,c2)
if userms: x,rms = getrelframe_rmsalign( movsele, refsele, Xform(-cen) )
else: x,rms = getrelframe( movsele, refsele, Xform(-cen)), 0.0
maxrms = max(maxrms,rms)
xforms[c1,c2] = x
#if c1 in "AB" and c2 in "AB":
# print movsele
# print refsele
# print x.pretty()
# print
# raise Exception
cmd.set_view(v)
return xforms, maxrms
def find_symelems(sele_or_xforms="all",verbose=False):
print "Hi, this is sym_util's find_symelems"
xforms = sele_or_xforms
if isinstance(sele_or_xforms,basestring): xforms, maxrms = get_xforms_by_chain(sele_or_xforms,verbose=True)
elif not isinstance(sele_or_xforms,dict): raise ValueError
symelems = list()
maxangerr = 0.0
for c,x in xforms.items():
assert len(c)==2
assert isinstance(x,Xform)
if c[0]==c[1]: continue
dis = x.t.length()
if dis > 5.0: continue
axis,ang = x.rotation_axis()
nfold = round(math.pi*2.0/ang)
angerr = abs(ang-math.pi*2.0/nfold)*180.0/math.pi
if verbose: print "candidate symelem:",nfold, c, angerr, axis
if angerr > 360.0/nfold/8.0: continue # require unambiguous symelems
maxangerr = max(maxangerr,angerr*nfold)
symelems.append( (nfold,axis,c,angerr) )
symelemdis = lambda x,y: line_line_angle_degrees(x[1],y[1]) if x[0]==y[0] else 9e9
if verbose:
for se1,se2 in filter( lambda t: t[0]<t[1], product(symelems,symelems) ):
if se1[0]==se2[0]:
print se1
print se2
print symelemdis(se1,se2), "degrees"
print
hier = HierarchicalClustering(symelems, symelemdis )
thresh = 6.0
clusters = hier.getlevel(thresh);
print "number of symmetry element clusters at threshold",thresh,"degrees is" , len(clusters)
centers0 = list()
maxaxiserr = 0.0
for clust in clusters:
print "symelem cluster:",clust
center = list(clust[0])
center[2] = list((center[2],))
for i in range(1,len(clust)):
ax = clust[i][1]
center[1] = center[1] + ( ax if ax.dot(center[1]) > 0 else -ax )
center[2].append(clust[i][2])
center[3] = max(center[3],clust[i][3])
center[1].normalize()
centers0.append(center)
axiserr = 0.0
for c in clust: axiserr = max( axiserr, 1.0-abs(center[1].dot(c[1])) )
maxaxiserr = max(maxaxiserr,axiserr)
# sort on nfold, then on number of chain pairs in cluster
centers0 = sorted( centers0, cmp = lambda x,y: cmp(y[0],x[0]) if x[0]!=y[0] else cmp(len(y[2]),len(x[2])) )
centers = list()
for center in centers0:
if verbose: print "DEBUG prune center:",center
seenit = False
for censeen in centers:
remainder = abs( ( censeen[0] / center[0] ) % 1.0)
if verbose: print " ",remainder,censeen
if remainder > 0.01: continue # not a symmetry multiple
if 1.0-abs(center[1].dot(censeen[1])) < 0.01:
seenit = True # axis are same
if not seenit:
centers.append(center)
print "centers:"
cen_of_geom = com("("+sele_or_xforms+") and (name CA and not HET)")
for center in centers:
print center
# if center[0]>2.1: continue
#showvecfrompoint(50*center[1],cen_of_geom)
return centers, maxrms, maxangerr, maxaxiserr
def guessdxaxes(sele="all",verbose=False):
print "Hi, this is sym_util's guessdxaxes"
nfold = len(cmd.get_chains(sele))
assert nfold % 2 is 0
nfold /= 2
symelems, maxrms, angerr, axiserr = find_symelems(sele,verbose=verbose)
for s in symelems: print s
assert len(symelems) > 1
assert symelems[0][0] == float(nfold)
assert symelems[1][0] == float(2)
axis_high = symelems[0][1]
axis_low = symelems[1][1]
return axis_high, axis_low, maxrms, angerr, axiserr
def aligndx(sele='all',verbose=False):
print "Hi, this is sym_util's aligndx"
trans(sele,-com(sele+" and name CA and not HET"))
haxis, laxis, maxrms, angerr, axiserr = guessdxaxes(sele,verbose=verbose)
xalign = alignvectors( haxis, laxis, Uz, Ux )
xform(sele,xalign)
return maxrms, angerr, axiserr
print "Hello again!"
def trim_sym(sel='all',na=1,nb=1): ##trim_sym('visible',3,3)
a = [x[1] for x in getres(sel + " and chain A")]
b = [x[1] for x in getres(sel + " and chain B")]
for ia in range( len(a)/na, len(a) ):
cmd.remove( sel + " and chain A and resi " + str(a[ia]) )
for ib in range( len(b)/nb, len(b) ):
cmd.remove( sel + " and chain B and resi " + str(b[ib]) )
cmd.extend('trim_sym',trim_sym)
def guesscxaxis(sele,nfold=None,chains0=list(),extrasel="name CA"):
sele = "(("+sele+") and ("+extrasel+") and (not het))"
check = False
if not chains0:
chains0.extend(cmd.get_chains(sele))
check = True
if not nfold:
nfold = len(chains0)
check = True
# print chains0
if check and len(chains0) != nfold:
print chains0
print "num chains != n-fold"
return None
print "chains0:", chains0
chains = list()
for i,c in enumerate(chains0):
if isinstance(c,basestring):
chains.append( (c,) )
elif isinstance(c,collections.Iterable):
chains.append( c )
else:
raise ValueError("chain must be string or list of strings")
atoms = cmd.get_model(sele).atom
chain_index = {}
for i,clist in enumerate(chains):
for c in clist:
chain_index[c] = i
coords = [list() for c in chains]
print len(coords),[len(x) for x in coords]
for a in atoms:
if a.chain in chain_index:
coords[chain_index[a.chain]].append(Vec(a.coord))
for c in coords:
print len(c)
return cyclic_axis(coords)
def aligncx(sele,nfold,alignsele=None,tgtaxis=Uz,chains=list(),extrasel="name CA"):
if not alignsele: alignsele = sele
tmp = guesscxaxis(alignsele,nfold,chains,extrasel)
if not tmp: return None
axis,cen,diserr,angerr = tmp
# trans(sele,-cen)
alignaxis(sele,tgtaxis,axis,xyz.Vec(0,0,0))
return tmp
# def alignd2(sele='all',chains=list()):
# alignsele = "(("+sele+") and (name CA))"
# if not chains: chains.extend(cmd.get_chains(alignsele))
# if 4 is not len(chains): raise NotImplementedError("D2 must have chains")
# ga1 = guesscxaxis( alignsele, 2,[ (chains[0],chains[1]), (chains[2],chains[3]) ] )
# ga2 = guesscxaxis( alignsele, 2,[ (chains[0],chains[2]), (chains[1],chains[3]) ] )
# assert ga1 is not None and ga2 is not None
# err = 90.0 - line_line_angle_degrees(ga1[0],ga2[0])
# x = alignvectors(ga1[0],ga2[0],Uz,Uy)
# xform(sele,x)
# trans(sele,-com(alignsele))
# return err
def symmetrize(sele="not symmetrized_*",alignsele=None,chains=list(),delete=True):
global nsymmetrizecx
if delete: cmd.delete("symmetrized_*")
tmp = guesscxaxis(sele,None,chains)
if not tmp: return None
axis,cen,diserr,angerr = tmp
# print "symmetrize TMP__C%i, distance err %f, algle error %f"%(len(chains),diserr,angerr)
for i,c in enumerate(chains):
newobj = "symmetrized_%i_%s"%(nsymmetrizecx,c)
cmd.create(newobj,"(%s) and chain %s"%(sele,chains[0]))
cmd.alter(newobj,"chain='%s'"%c)
rot(newobj,axis,360.0*float(i)/float(len(chains)),cen)
print "rot(",newobj,',',axis,',',360.0*float(i)/float(len(chains)),',',cen,')'
newobj = "symmetrized_%i"%(nsymmetrizecx)
cmd.create(newobj,"symmetrized_%i_*"%(nsymmetrizecx))
cmd.delete("symmetrized_%i_*"%nsymmetrizecx)
# print "rms",cmd.align(newobj,sele)
nsymmetrizecx += 1
return tmp
guessc2axis = functools.partial(guesscxaxis,nfold=2)
guessc3axis = functools.partial(guesscxaxis,nfold=3)
guessc4axis = functools.partial(guesscxaxis,nfold=4)
guessc5axis = functools.partial(guesscxaxis,nfold=5)
guessc6axis = functools.partial(guesscxaxis,nfold=6)
alignc2 = functools.partial(aligncx,nfold=2)
alignc3 = functools.partial(aligncx,nfold=3)
alignc4 = functools.partial(aligncx,nfold=4)
alignc5 = functools.partial(aligncx,nfold=5)
alignc6 = functools.partial(aligncx,nfold=6)
def showcxaxis(sele,nfold=None,chains=list(),length=30,col=(1,1,1),lbl="Cx Axis"):
g = guesscxaxis(sele,nfold,chains)
showvecfrompoint(g[0]*2*length,g[1]-g[0]*length,col=col,lbl=lbl)
def myint(s):
i = len(s)
while i > 0 and not s[:i].isdigit(): i -= 1
if not i: return None
return int(s[:i])
def selbycomp(trn=0):
cmd.select("TRI1","TRI and chain A+B+C")
cmd.select("TRI2","TRI and chain D+E+F")
cmd.select("TRI3","TRI and chain G+H+I")
cmd.select("TRI4","TRI and chain J+K+L")
cmd.select("TRI5","TRI and chain xyz.Mat+N+O")
cmd.select("TRI6","TRI and chain P+Q+R")
cmd.select("TRI7","TRI and chain S+T+U")
cmd.select("TRI8","TRI and chain xyz.Vec+W+Ux")
cmd.select("DIM1","DIM and chain A+D")
cmd.select("DIM2","DIM and chain B+G")
cmd.select("DIM3","DIM and chain C+J")
cmd.select("DIM4","DIM and chain E+U")
cmd.select("DIM5","DIM and chain F+R")
cmd.select("DIM6","DIM and chain H+T")
cmd.select("DIM7","DIM and chain I+O")
cmd.select("DIM8","DIM and chain K+Q")
cmd.select("DIM9","DIM and chain L+N")
cmd.select("DIM10","DIM and chain xyz.Mat+xyz.Vec")
cmd.select("DIM11","DIM and chain P+W")
cmd.select("DIM12","DIM and chain Ux+S")
cmd.delete("LINE*")
cmd.delete("serf*")
cmd.do("""alter all, b=50
alter all, q=1
set gaussian_resolution,8""")
ISO="""map_new map%s, gaussian, 2, %s, 10
isosurface surf%s, map%s"""
for i in range(1, 9):
cmd.do(ISO%(("TRI%i"%i,)*4))
cmd.color(COLORS[i-1],"surfTRI%i"%i)
c = com("TRI%i"%i)
# trans("TRI%i"%i,trn*c.normalized())
obj = [
cgo.CYLINDER,
0.0, 0.0, 0.0,
1.6*c.x, 1.6*c.y, 1.6*c.z,
1.5,
0.1,0.1,0.1,0.1,0.1,0.1,
]
cmd.load_cgo(obj,'LINETRI%i'%i)
for i in range(1,13):
cmd.do(ISO%(("DIM%i"%i,)*4))
cmd.color(COLORS[i+7],"surfDIM%i"%i)
c = com("DIM%i"%i)
# trans("DIM%i"%i,trn*com("DIM%i"%i).normalized())
obj = [
cgo.CYLINDER,
0.0, 0.0, 0.0,
1.3*c.x, 1.3*c.y, 1.3*c.z,
1.0,
0,0,1,0,0,1
]
cmd.load_cgo(obj,'LINEDIM%i'%i)
def getframe(obj):
m = cmd.get_model(obj)
x = xyz.Vec(m.atom[ 0 ].coord)
y = xyz.Vec(m.atom[len(m.atom)/2].coord)
z = xyz.Vec(m.atom[ -1 ].coord)
frame = xyz.stub(x,y,z)
# print "getframe:",frame
return frame
def getrelframe(newobj,refobj,Forigin=None):
"""get transform between two objects, assume the obj's are identical"""
if Forigin is None: Forigin = xyz.Xform(xyz.Imat,xyz.Vec(0,0,0))
Fref = Forigin*getframe(refobj+" and name CA")
Fnew = Forigin*getframe(newobj+" and name CA")
Fdelta = Fnew * ~Fref
return Fdelta
def getrelframe_rmsalign(movsel,refsel,Forigin=None):
"""get transform between two objects using rmsalign"""
tmpref = "TMP__getrelframe_rmsalign_REF"
tmpmov = "TMP__getrelframe_rmsalign_MOV"
cmd.create(tmpref,refsel)
cmd.create(tmpmov,refsel)
# cmd.super(tmpref,refsel) # shouldn't be necessary
alignresult = cmd.align(tmpmov,movsel)
result = getrelframe(tmpmov,tmpref,Forigin)
cmd.delete(tmpmov)
cmd.delete(tmpref)
return result, alignresult[0]
def rechain(sel,nres):
chains = ROSETTA_CHAINS
ntot = len(getres(sel))
assert ntot % nres == 0
for i in range(ntot/nres):
cmd.alter("resi %i-%i"%( nres*i+1,nres*(i+1)),"chain='%s'"%chains[i])
def makekinwire(sel,movres,fixres):
v = cmd.get_view()
cmd.delete("ha"); cmd.create("ha",sel); cmd.alter("ha","chain='A'")
cmd.delete("hb"); cmd.create("hb",sel); cmd.alter("hb","chain='B'")
cmd.delete("hc"); cmd.create("hc",sel); cmd.alter("hc","chain='C'")
cmd.delete("hd"); cmd.create("hd",sel); cmd.alter("hd","chain='D'")
cmd.delete("he"); cmd.create("he",sel); cmd.alter("he","chain='E'")
cmd.align("hb and resi %i"%movres,"ha and resi %i"%fixres);
cmd.align("hc and resi %i"%movres,"hb and resi %i"%fixres);
cmd.align("hd and resi %i"%movres,"hc and resi %i"%fixres);
cmd.align("he and resi %i"%movres,"hd and resi %i"%fixres);
util.cbc('elem C')
v = cmd.set_view(v)
def get_contigs(x,n=7):
"""
>>> test = list(range(1,8)) + list(range(20,33)) + list(range(40,44)) + list(range(49,50))+ list(range(0,8))
>>> print test
[1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 40, 41, 42, 43, 49, 0, 1, 2, 3, 4, 5, 6, 7]
>>> print get_contigs( test )
[[1, 2, 3, 4, 5, 6, 7], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [0, 1, 2, 3, 4, 5, 6, 7]]
"""
if type(x) is type(""):
x = [x[1] for x in getres(x)]
x.append(-123456789)
contigs = [[],]
for i in range(len(x)-1):
contigs[-1].append(x[i])
if x[i]+1 is not x[i+1]:
contigs.append(list())
return [c for c in contigs if len(c) >= n]
# def get_contigs_termini(x,n=7):
# """
# >>> test = list(range(1,8)) + list(range(20,33)) + list(range(40,44)) + list(range(49,50))+ list(range(0,8))
# >>> print test
# [1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 40, 41, 42, 43, 49, 0, 1, 2, 3, 4, 5, 6, 7]
# >>> print get_contigs_termini( test )
# [[1, 2, 3, 4, 5, 6, 7], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [0, 1, 2, 3, 4, 5, 6, 7]]
# """
# pass #cend = []
def get_fixed_size_contigs(x,n=7):
"""
>>> test = list(range(1,8)) + list(range(20,33)) + list(range(40,44)) + list(range(49,50))+ list(range(0,8))
>>> print test
[1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 40, 41, 42, 43, 49, 0, 1, 2, 3, 4, 5, 6, 7]
>>> for f in get_fixed_size_contigs(test,7): print f
[1, 2, 3, 4, 5, 6, 7]
[20, 21, 22, 23, 24, 25, 26]
[21, 22, 23, 24, 25, 26, 27]
[22, 23, 24, 25, 26, 27, 28]
[23, 24, 25, 26, 27, 28, 29]
[24, 25, 26, 27, 28, 29, 30]
[25, 26, 27, 28, 29, 30, 31]
[26, 27, 28, 29, 30, 31, 32]
[0, 1, 2, 3, 4, 5, 6]
[1, 2, 3, 4, 5, 6, 7]
>>> for f in get_fixed_size_contigs(test,9): print f
[20, 21, 22, 23, 24, 25, 26, 27, 28]
[21, 22, 23, 24, 25, 26, 27, 28, 29]
[22, 23, 24, 25, 26, 27, 28, 29, 30]
[23, 24, 25, 26, 27, 28, 29, 30, 31]
[24, 25, 26, 27, 28, 29, 30, 31, 32]
>>> print len(get_fixed_size_contigs(test,1))
28
>>> for f in get_fixed_size_contigs(test,4): print f
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
[4, 5, 6, 7]
[20, 21, 22, 23]
[21, 22, 23, 24]
[22, 23, 24, 25]
[23, 24, 25, 26]
[24, 25, 26, 27]
[25, 26, 27, 28]
[26, 27, 28, 29]
[27, 28, 29, 30]
[28, 29, 30, 31]
[29, 30, 31, 32]
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
[4, 5, 6, 7]
"""
f = []
for c in get_contigs(x):
for i in range(0,len(c)-n+1):
f.append(range(c[i],c[i]+n))
return f
def tmpname():
return "TEMPORARY_"+str(random.random())
def gen_helical_alignments(sele1,sele2,pref="HALN"):
cmd.delete(pref+"_*")
cmd.alter('(%s) or (%s)'%(sele1,sele2),'resn="ALA"')
cmd.alter(sele2+' and chain A','chain="Z"')
cmd.alter(sele2+' and chain B','chain="Y"')
cmd.alter(sele2+' and chain C','chain="X"')
cmd.alter(sele2+' and chain D','chain="W"')
cmd.alter(sele2+' and chain E','chain="V"')
cmd.alter(sele2+' and chain F','chain="U"')
cmd.alter(sele2+' and chain G','chain="T"')
cmd.alter(sele2+' and chain H','chain="S"')
cmd.alter(sele2+' and chain I','chain="R"')
cmd.alter(sele2+' and chain J','chain="Q"')
chunks1 = ["chain A and resi "+str(h)[1:-1].replace(', ','+') for h in get_fixed_size_contigs("chain A and %s and ss H"%sele1)]
chunks2 = ["chain Z and resi "+str(h)[1:-1].replace(', ','+') for h in get_fixed_size_contigs("chain Z and %s and ss H"%sele2)]
for i,hsel1 in enumerate(chunks1):
name1 = pref+"_"+tmpname()
algn1 = name1+" and "+hsel1
cmd.create(name1,sele1)
for j,hsel2 in enumerate(chunks2):
name2 = pref+"_"+tmpname()
algn2 = name2+" and "+hsel2
cmd.create(name2,sele2)
print algn2,algn1
print name1+" and chain A and "+hsel1
print name2+" and chain Z and "+hsel2
# now align them
cmd.align( name2+" and chain Z and "+hsel2, name1+" and chain A and "+hsel1 )
name3 = pref+"_%03i_%03i"%(i,j)
cmd.create(name3,name1+" or "+name2)
util.cbc(name3+" and elem C")
cmd.delete(name2)
cmd.delete(name1)
def colorI53(sel="visible"):
a1 = '('+sel+') and (elem C) and (chain A+C+E+G+I)'
a2 = '('+sel+') and (elem C) and (chain 2+M+Q+U+Y)'
b1 = '('+sel+') and (elem C) and (chain B+L+N)'
b2 = '('+sel+') and (elem C) and (chain 3+D+b)'
cmd.color('cyan' ,a1)
cmd.color('green' ,b1)
cmd.color('magenta' ,a2)
cmd.color('yellow' ,b2)
def alignsym(sel="all",arch="I32",ax1=Vec(0,0,1),ax2=Vec(0.356825,0.000002,0.934171)):
if arch == "T32":
tgt1 = Vec( 1.00000000000000, 1.00000000000000, 1.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 0.00000000000000, 0.00000000000000 ).normalized()
if arch == "T33":
tgt1 = Vec( 1.00000000000000, 1.00000000000000, 1.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 1.00000000000000,-1.00000000000000 ).normalized()
if arch == "O32":
tgt1 = Vec( 1.00000000000000, 1.00000000000000, 1.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 1.00000000000000, 0.00000000000000 ).normalized()
if arch == "O42":
tgt1 = Vec( 1.00000000000000, 0.00000000000000, 0.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 1.00000000000000, 0.00000000000000 ).normalized()
if arch == "O43":
tgt1 = Vec( 1.00000000000000, 0.00000000000000, 0.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 1.00000000000000, 1.00000000000000 ).normalized()
if arch == "I32":
tgt1 = Vec( 0.93417235896272, 0.00000000000000, 0.35682208977309 ).normalized()
tgt2 = Vec( 1.00000000000000, 0.00000000000000, 0.00000000000000 ).normalized()
if arch == "I52":
tgt1 = Vec( 0.85065080835204, 0.52573111211914, 0.00000000000000 ).normalized()
tgt2 = Vec( 1.00000000000000, 0.00000000000000, 0.00000000000000 ).normalized()
if arch == "I53":
tgt1 = Vec( 0.85065080835204, 0.52573111211914, 0.00000000000000 ).normalized()
tgt2 = Vec( 0.93417235896272, 0.00000000000000, 0.35682208977309 ).normalized()
if abs(ax1.angle(ax2) - tgt1.angle(tgt2) ) > 0.001:
print "your axes aren't spaced correctly for",arch,"angle should be",tgt1.angle(tgt2)
return
x = alignvectors( ax1, ax2, tgt1, tgt2 )
xform(sel,x)
def xtal_frames(tgt=None,skip=tuple(),r=100):
axes = list()
objs = cmd.get_object_list()
if not tgt: tgt = objs[0]
assert tgt in objs
c = com(tgt)
covered = list()
for o in objs:
if o == tgt: continue
x = getrelframe(tgt,o)
# seenit = False
# for xc in covered:
# if x.t.distance(xc.t) < 0.001:
# seenit = True
# if seenit: continue
axis,ang = x.rotation_axis()
if ang < 1.0: continue # hack, nfold <= 6
mov = proj( axis, x.t).length()
if abs(mov) > 0.001: continue
print o
nf = 2*math.pi/ang
if nf % 1.0 > 0.001: continue
nf = int(nf)
if nf in skip: continue
if nf > 6 or nf == 5 or nf == 1: continue
ctot = Vec(0,0,0)
xtot = Xform()
for i in range(nf):
ctot += c
covered.append(xtot)
c = x * c
xtot *= x
ctot /= nf
# beg = ctot - r*axis
# end = ctot + r*axis
beg = ray_sphere_intersection( axis,ctot,c,r)
end = ray_sphere_intersection(-axis,ctot,c,r)
if not beg or not end: continue
if nf is not 2: showcyl(beg,end,0.3,col=(1.0,1.0,1.0))
else: showcyl(beg,end,0.2,col=(1.0,0.5,0.2))
print round(nf),ctot,o
def makeh(sele='vis',n=30):
cmd.delete('helix')
v = cmd.get_view()
x0 = getrelframe(sele+' and chain B',sele+' and chain A')
x = Xform()
cmd.create('tmp',sele+' and chain A and name n+ca+c')
for i in range(n):
cmd.create('Htmp%i'%i,'tmp')
xform('Htmp%i'%i,x)
cmd.alter('Htmp%i'%i,"chain='%s'"%ROSETTA_CHAINS[i])
print ROSETTA_CHAINS[i]
x = x * x0
cmd.create("HELIX",'Htmp*')
cmd.delete("Htmp*")
cmd.delete('tmp')
cmd.hide('ev','HELIX')
cmd.show('lines','helix')
util.cbc('HELIX')
cmd.set_view(v)
cmd.extend('makeh',makeh)
def color_by_2component(col1="green",col2="cyan"):
chains = r"""ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz!@#$&.<>?]{}|-_\~=%"""
chains1 = [chains[i] for i in range(0,len(chains),2)]
chains2 = [chains[i] for i in range(1,len(chains),2)]
for c in chains1:
print c+c+c+c+c
cmd.color(col1,"chain "+c)
for c in chains2:
print c+c+c+c+c
cmd.color(col2,"chain "+c)
def make_ab_components(dir):
if not os.path.exists(dir+"_AB"):
os.mkdir(dir+"_AB")
for fn in os.listdir(dir):
if not fn.endswith(".pdb") and not fn.endswith(".pdb.gz"): continue
cmd.delete("all")
cmd.load(dir+"/"+fn,"a")
makec6("a",name="c6")
cmd.save(dir+"_AB/"+fn,"c6 and chain A+B")
def nulltest():
"""
>>> print "foo"
foo
"""
return None
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == '__main__':
import doctest
r = doctest.testmod()
print r
| [
"natteruw@sill.dhcp.baker"
] | natteruw@sill.dhcp.baker |
2dca0bd1777f06f89dde2a3e9d3cc9bf911d1be5 | 58580154e99fce5f2baa1c2d23c8e587c08a730a | /mlsummary/clustering/_optics.py | 95d4b91bb2643463b029a5e02e4e7e9fae9fb972 | [
"BSD-3-Clause"
] | permissive | serafinialessio/mlsummary | 1376108c8a1c2b323045b5b9251bd0f5410c7705 | c7f6d047fbedf182571f595f7aa59a4652df4229 | refs/heads/main | 2023-01-13T22:25:26.540860 | 2020-11-24T17:01:45 | 2020-11-24T17:01:45 | 315,001,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | import numpy as np
import pandas as pd
from mlsummary.clustering._clustering_functions import _clust_weight, _clustering_metrics, \
_clustering_evaluation, _store_X, _fm, _ari, _sil, _db, _ch, _clust_centers_X
from mlsummary.outliers._outliers_functions import _scatter_clusters_outliers
class opticsSummary:
def __init__(self, obj, X=None, labels_true=None, store_X=False, digits = 3):
self.model = obj
self.n_clusters = np.unique(obj.labels_).shape[0]
self.variables = obj.n_features_in_
self.SIL, self.DB, self.CH = _clustering_metrics(obj.labels_, X, digits)
self.centers = _clust_centers_X(X, obj.labels_)
self.labels = obj.labels_
self.labels_names = obj.n_features_in_
self.cluster_size, self.cluster_weights = _clust_weight(obj.labels_)
self.ARI, self.FM = _clustering_evaluation(obj.labels_, labels_true, digits)
self.eps = obj.eps
self.max_eps = obj.max_eps
self.min_samples = obj.min_samples
self.min_cluster_size = obj.min_cluster_size
self.metric = obj.metric
self.power_mink = obj.p
self.leaf = obj.leaf_size
self.algorithm_type = obj.algorithm
self.cluster_method = obj.cluster_method
self.xi = obj.xi
self.X = _store_X(X, store_X)
self.labels_true = labels_true
def describe(self):
print('OPTICS algorithm')
print('------------------')
print('Number of clusters: {}'.format(self.n_clusters))
print('Labels name: {}'.format(self.labels_names))
print('eps: {}'.format(self.eps))
print('Max eps: {}'.format(self.max_eps))
print('Metric: {}'.format(self.metric))
print('Min samples: {}'.format(self.min_samples))
print('Min cluster size: {}'.format(self.min_cluster_size))
print('Algorithm: {}'.format(self.algorithm_type))
print('Leaf size: {}'.format(self.leaf))
if self.ARI is not None:
print('Adjusted Rand Index: {}'.format(self.ARI))
if self.FM is not None:
print('Fowlkes Mallows: {}'.format(self.FM))
if self.SIL is not None:
print('Silhouette: {}'.format(self.SIL))
if self.DB is not None:
print('Davies Bouldin: {}'.format(self.DB))
if self.CH is not None:
print('Calinski Harabasz: {}'.format(self.CH))
print('Clusters weights: \n {}'.format(self.cluster_size.to_frame().transpose().to_string(index = False)))
print('Clusters weights: \n {}'.format(self.cluster_weights.to_frame().transpose().to_string(index = False)))
#print('Cluster centers: \n {}'.format(self.centers))
#print('Available attributes: \n {}'.format(self.__dict__.keys()))
def __str__(self):
return 'OPTICS algorithm with {} clusters \n Available attributes: \n {}'.format(self.n_clusters, self.__dict__.keys())
def __repr__(self):
return 'OPTICS algorithm with {} clusters \n Available attributes: \n {}'.format(self.n_clusters, self.__dict__.keys())
def plot(self, X = None, palette='Set2'):
if X is None:
X = self.X
labels = self.labels
_scatter_clusters_outliers(_store_X(X, True), labels, palette)
def ari(self, labels, labels_true, digits = 3):
return _ari(labels, labels_true, digits)
def fm(self, labels, labels_true, digits = 3):
return _fm(labels, labels_true, digits)
def sil(self, X, labels, digits = 3):
return _sil(X, labels, digits)
def db(self, X, labels, digits = 3):
return _db(X, labels, digits)
def ch(self,X, labels, digits = 3):
return _ch(X, labels, digits)
| [
"srf.alessio@gmail.com"
] | srf.alessio@gmail.com |
27963a655be8a9cba63d2ffd03c96a21734e4eb4 | 05820ee892f87fe47df107deb48ef456c952699f | /bashfuscator/modules/string_obfuscators/hex_hash.py | ccf220f9e1a5efcdd0ee19f0656f4774c59689c4 | [
"MIT",
"BSD-3-Clause"
] | permissive | maliciousgroup/Bashfuscator | 45bb5061b51baacd6c63b2ae6d7e4ef6ea514331 | a6279842acb43c9572c39283786ff4203efbb32f | refs/heads/master | 2021-01-04T23:30:59.929643 | 2020-02-28T15:38:58 | 2020-02-28T15:38:58 | 240,796,740 | 3 | 0 | MIT | 2020-02-15T22:28:03 | 2020-02-15T22:28:03 | null | UTF-8 | Python | false | false | 1,100 | py | import hashlib
from bashfuscator.core.mutators.string_obfuscator import StringObfuscator
class HexHash(StringObfuscator):
def __init__(self):
super().__init__(
name="Hex Hash",
description="Uses the output of md5 to encode strings",
sizeRating=5,
timeRating=5,
binariesUsed=["cut", "md5sum"],
author="Elijah-Barker"
)
def mutate(self, userCmd):
for ch in userCmd:
hexchar = str(bytes(ch, "utf-8").hex())
randomhash = ""
while not hexchar in randomhash:
m = hashlib.md5()
randomString = self.randGen.randGenStr()
m.update(bytes(randomString, "utf-8"))
randomhash = m.hexdigest()
index = randomhash.find(hexchar)
self.mangler.addPayloadLine(f"""* *:printf:^ ^"\\x$(* *:printf:^ ^%s^ ^'{randomString}'* *|* *:md5sum:* *|* *:cut:^ ^-b^ ^&{str(index + 1)}&-&{str(index + 2)}&* *)"* *END0""")
self.mangler.addJunk()
return self.mangler.getFinalPayload()
| [
"jalefevre@liberty.edu"
] | jalefevre@liberty.edu |
29494c29a43d43c4bf909ceb1816cc265b8acc9c | cffabf7d1a1583fe6987feba508d4c2e8e5637df | /src/us-co/scrape-statcode-us-co | e39418dd5e1333e9ca5b22d0b76288301cd0bf0c | [
"CC0-1.0"
] | permissive | esbranson/openlaw | 62e4b486a84b0156a25728fd4464bd981ad77c2e | d3fd06b53f7bc5917fc91527ca6ad717e739e667 | refs/heads/master | 2022-12-11T08:37:19.713993 | 2022-04-29T17:56:09 | 2022-04-29T17:56:09 | 17,883,540 | 17 | 6 | null | null | null | null | UTF-8 | Python | false | false | 37,653 | #! /usr/bin/python3 -uW all
# -*- coding: utf-8 -*-
usage="""
scrape-statcode-us-co - convert the Colorado Revised Statutes into Akoma Ntoso
See <https://en.wikipedia.org/wiki/Colorado_Revised_Statutes>. Given titles as
RTF files, in order, this will output them as an Akoma Ntoso XML file.
Usage: scrape-statcode-us-co [options] file [file ...]
Arguments:
file input RTF file from the bulk download site
-o file output file ('-' for stdout) (default: stdout)
-n num number of parallel threads (default: 2)
-p num number of Open Office processes (default: 1)
-d enable debuging output (default: warnings only)
"""
import sys
import os
import getopt
import lxml.etree as etree
import uno
import unohelper
import shlex
import subprocess
import time
import logging
import mimetypes
import enum
import collections
import concurrent.futures
import threading
import queue
import tempfile
import types
##
# Entry function: Parse paramters, call main function.
#
def main():
fout = sys.stdout.buffer
debug = logging.INFO
threads = 2
processes = 1
logging.SUPERDEBUG = logging.DEBUG-2 # XXX monkey fix
logging.UBERDEBUG = logging.DEBUG-4
# parse arguments
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:n:p:dh')
except getopt.GetoptError:
logging.fatal('getopt error {}'.format(usage))
return 1
for opt, arg in opts:
if opt in {'-d', '--debug'}:
if debug is logging.INFO:
debug = logging.DEBUG
elif debug is logging.DEBUG:
debug = logging.SUPERDEBUG
elif debug is logging.SUPERDEBUG:
debug = logging.UBERDEBUG
else:
logging.warning("main unknown debugging level")
debug = logging.DEBUG
elif opt in {'-q', '--quiet'}:
debug = logging.WARNING
elif opt in {'-o'}:
fout = arg
elif opt in {'-n'}:
threads = int(arg)
elif opt in {'-p'}:
processes = int(arg)
elif opt in {'-h', '--help'}:
print(opt, usage)
return 0
else:
logging.fatal('invalid flag {}{}'.format(opt, usage))
return 1
if len(args) < 1:
logging.fatal('need input files {}'.format(usage))
return 1
fns = args
# configure
logging.basicConfig(format='{levelname} {process}/{thread}/{funcName} {message}', style='{', level=debug)
logging.addLevelName(logging.SUPERDEBUG, 'SUPERDEBUG')
logging.addLevelName(logging.UBERDEBUG, 'UBERDEBUG')
# logging.Logger.superdebug = lambda inst, msg, *args, **kwargs: inst.log(logging.SUPERDEBUG, msg, *args, **kwargs)
logging.superdebug = lambda msg, *args, **kwargs: logging.log(logging.SUPERDEBUG, msg, *args, **kwargs)
# logging.Logger.uberdebug = lambda inst, msg, *args, **kwargs: inst.log(logging.UBERDEBUG, msg, *args, **kwargs)
logging.uberdebug = lambda msg, *args, **kwargs: logging.log(logging.UBERDEBUG, msg, *args, **kwargs)
# do it
ret = do_it(fns, fout, processes, threads)
return ret
##
# Execute do_parse() against given filenames in parallel.
#
def do_it(fns, fout, nprocs, nthreads):
ret = 0
# start soffice processes
procs = []
for i in range(nprocs):
#pipename, tmpd, p = OOFile.start_soffice()
procs.append(OOFile.start_soffice())
time.sleep(5)
# xml body
akn = etree.Element('akomaNtoso', nsmap={None: "http://docs.oasis-open.org/legaldocml/ns/akn/3.0/WD17"})
act = etree.SubElement(akn, 'act')
meta = etree.SubElement(act, 'meta')
body = etree.SubElement(act, 'body')
# submit tasks
executor = concurrent.futures.ThreadPoolExecutor(max_workers=nthreads)
futures = []
for i,fn in enumerate(fns):
pipename = procs[i % nprocs][0]
# errq signals producer to fail
errq = queue.Queue() # XXX beware IPC v. interthread
future = executor.submit(do_parse, pipename, fn, errq)
futures.append((future, errq))
# complete tasks
try:
for xml in (future.result() for future,errq in futures):
# TODO we should have a faster way to detect errors
if xml is None:
# breaking cleanly triggers else clause
# XXX is this really shutting down executor?
raise RuntimeError('do_parse failed: xml is None')
body.append(xml)
except BaseException as e:
logging.critical('do_it exception: {} {}'.format(type(e), e))
ret = 1
else:
tree = etree.ElementTree(akn)
tree.write(fout)
finally:
# cleanup
logging.info('do_it cleaning up')
logging.debug('do_it poisoning queues and cancelling futures')
for future,errq in reversed(futures):
future.cancel()
errq.put(False)
logging.debug('do_it closing executor')
executor.shutdown()
logging.debug('do_it closing processes')
for pipename, tmpd, p in procs:
if p.poll() is None:
logging.debug('do_it closing soffice')
OOFile.terminate_soffice(OOFile.connect_soffice(pipename))
p.wait()
tmpd.cleanup()
return ret
##
# Parse a file and return partial Akoma Ntoso XML.
#
def do_parse(pipename, fn, errq):
logging.info('do_parse parsing: {}'.format(fn))
xml = None
try:
paraq = queue.Queue(100)
xmlq = queue.Queue(50)
outq = queue.Queue() # XXX should we bound these?
# open files, build threads
# TODO different types of files?
mime = mimetypes.guess_type(fn)
if mime[0] == 'application/rtf':
filethread = threading.Thread(target=OOFile.run, args=(pipename, fn, errq, paraq))
else:
logging.critical('do_parse unknown filetype: {} {}'.format(fn, mime))
return None
parserthread = threading.Thread(target=OOFileParser.run, args=(paraq, xmlq, errq))
builderthread = threading.Thread(target=XMLBuilder.run, args=(xmlq, outq, errq))
# parse
builderthread.start()
parserthread.start()
filethread.start()
xml = outq.get()
if xml is False: # TODO implement better queue poisoning
xml = None
except OSError as e:
logging.critical('do_parse opening files: {}'.format(e))
return None
except BaseException as e:
logging.critical('do_parse exception: {} {}'.format(type(e), e))
logging.info('do_parse done: {}'.format(fn))
# wait for completion of threads
# TODO is this necessary?
filethread.join()
parserthread.join()
builderthread.join()
# return
return xml
##
# A state machine that parses a stream of semi-structured document lines
# into partial Akoma Ntoso XML. The parse() function will consume the input
# and output an XML object.
#
class XMLBuilder:
def __init__(self):
# `state' is an ordered dictionary with the top-most
# element of each type, which represents our heirarchy
# of elements
self.state = collections.OrderedDict()
self.state['title'] = None
self.state['article'] = None
self.state['part'] = None
self.state['subpart'] = None
self.state['section'] = None
self.state['subsection'] = None
self.state['paragraph'] = None
self.state['subparagraph'] = None
self.state['subsubparagraph'] = None
##
#
#
@staticmethod
def run(inq, outq, errq):
try:
builder = XMLBuilder()
builder.parse(inq, outq, errq)
except BaseException as e:
logging.critical('XMLBuilder.run exception: {} {}'.format(type(e), e), exc_info=True)
outq.put(False) # poison queue
errq.put(False)
##
# Parse all messages in @inq and return an XML object.
#
def parse(self, inq, outq, errq):
assert inq is not None
# process messages
while True:
msg = inq.get()
if msg is None: # poison pill
outq.put(self.get_state_top())
break
elif msg is False:
outq.put(False) # poison queue
errq.put(False)
break
logging.superdebug('XMLBuilder.parse: {}'.format(msg))
self.event(msg)
##
# Process a signal.
#
def event(self, signal):
typ = signal['type']
if typ in {'heirarchy'}:
self.event_heirarchy(signal)
else:
raise RuntimeError('XMLBuilder: unknown event: {}'.format(signal))
##
# All heirarchical elements are essentially the same, except that only
# sections and below have content and have different ID algorithms.
#
def event_heirarchy(self, signal):
typ, enum, head, text, status = signal['subtype'], signal['enum'], signal['heading'], signal['content'], signal['status']
# determine subtype
if typ is None:
typ = self.parse_heirarchy_type(enum)
# create element
el = etree.Element(typ)
# info
el.attrib['title'] = self.get_name(typ, enum)
el.attrib['id'] = self.get_id(typ, enum)
if status:
el.attrib['status'] = status
if enum:
nel = etree.SubElement(el, 'num')
nel.text = enum
if head:
hel = etree.SubElement(el, 'heading')
hel.text = head
if text:
tel = etree.SubElement(el, 'content')
pel = etree.SubElement(tel, 'p')
pel.text = text
# get parent (only title has no parent) and attach
parentel = self.get_state_parent(typ)
if parentel is not None:
parentel.append(el)
else:
logging.debug('event_section no parent: {}'.format(signal))
# update state
self.set_state(el, typ)
##
# Determine the type of element from its enumeration.
#
# Note that 'I' may be a subparagraph, or it may be a
# sub-subparagraph that comes after 'H' etc.
#
# -------------------------
# | s | type |
# -------------------------
# | 1 | subsection |
# | a | paragraph |
# | IV | subparagraph |
# | A | sub-subparagraph |
# -------------------------
#
def parse_heirarchy_type(self, s):
ret = 'subsection'
if s.isdecimal():
ret = 'subsection'
elif s.islower():
ret = 'paragraph'
elif 'I' not in s and 'V' not in s and 'X' not in s:
ret = 'subsubparagraph'
elif s == 'I' and self.state['subsubparagraph'] is not None and self.state['subsubparagraph'][0] == 'H':
ret = 'subsubparagraph'
elif s == 'V' and self.state['subsubparagraph'] is not None and self.state['subsubparagraph'][0] == 'U':
ret = 'subsubparagraph'
elif s == 'X' and self.state['subsubparagraph'] is not None and self.state['subsubparagraph'][0] == 'W':
ret = 'subdivision'
else:
logging.superdebug('heirarchy_type assume roman num: {}'.format(s))
ret = 'subparagraph'
return ret
##
#
#
def get_name(self, typ, enum):
assert typ is not None
name = typ[0].upper() + typ[1:]
if enum is not None: # XXX if no enum, is this required to be unique?
name += ' ' + enum
return name
##
# XXX requires non-None parent to have id attribute?
#
def get_id(self, typ, enum):
assert typ is not None and enum is not None
parentel = self.get_state_parent(typ)
if parentel is None:
# XXX only top-most element's parent will be None?
ident = '/' + typ + '-' + enum
elif typ in {'section'}:
ident = '/' + typ + '-' + enum
elif XMLBuilder.test_above_section(typ):
ident = parentel.attrib['id'] + '/' + typ + '-' + enum
elif XMLBuilder.test_below_section(typ):
ident = parentel.attrib['id'] + '/' + enum
else:
logging.critical('get_id unknown type: {}'.format(typ))
raise RuntimeError('get_id unknown type: {}'.format(typ))
return ident
##
# Test if type is below section type.
#
# TODO should probably make more reboust to changes in heirarchy tree
#
@staticmethod
def test_below_section(typ):
return typ in {'subsection', 'paragraph', 'subparagraph', 'subsubparagraph'}
##
# Test if type is below section type.
#
# TODO should probably make more reboust to changes in heirarchy tree
#
@staticmethod
def test_above_section(typ):
return typ in {'title', 'article', 'part', 'subpart'}
##
# Get the lowest non-None element above type, or None if its the highest.
#
def get_state_parent(self, typ):
# get a reversed list of keys above typ
keys = list(self.state.keys())
keys = reversed(keys[:keys.index(typ)])
# get bottom-most element above typ
for key in keys:
if self.state[key] is not None:
return self.state[key]
return None
##
# Get and return the top-most element.
#
def get_state_top(self):
for key in self.state.keys():
if self.state[key] is not None:
return self.state[key]
##
# Update (and normalize) state.
#
def set_state(self, el, typ):
# update state
self.state[typ] = el
# normalize state: clear all elements below type from state
keys = list(self.state.keys())
keys = keys[keys.index(typ)+1:]
for key in keys:
self.state[key] = None
##
# A state machine program that parses a stream of unstructured lines into
# a stream of structured elements.
#
# Its essentially a Mealy machine, whose output is a list of
# structured elements, which are returned on event(). The parse() function
# will drive the event loop and yield each such line.
#
# XXX need to track down those random heirarchical subheadings that
# pop up out of nowhere, and make sure they are not getting picked up
# as text
#
class OOFileParser:
##
#
#
class StateEnum(enum.IntEnum):
init = 1 # TODO needed?
idle = 3
heirarchy = 4
section = 5
section_idle = 6
section_note = 7
section_note_one = 8
section_note_two = 9
text = 10
##
#
#
def __init__(self):
self.state = self.StateEnum.init
self.last_line_ended_with_colon = False
self.stash = None
##
#
#
@staticmethod
def run(inq, outq, errq):
try:
parser = OOFileParser()
parser.parse(inq, outq, errq)
except BaseException as e:
logging.critical('OOFileParser.run exception: {} {}'.format(type(e), e), exc_info=True)
outq.put(False) # poison queue
errq.put(False)
##
# Parse messages from @inq and output resulting messages in @outq.
#
def parse(self, inq, outq, errq):
assert inq is not None and outq is not None
while True:
inmsg = inq.get()
if inmsg is None: # poison pill
outq.put(None) # poison queue
break
elif inmsg is False:
outq.put(False)
errq.put(False)
break
for outmsg in self.event(inmsg):
outq.put(outmsg)
##
# Consume an event and return a list of structured elements
# in the form of {'type':, 'enum':, 'heading':, 'content':}.
#
# The event function is chosen by current state; the transition
# function is (then) chosen by current state and the signal. As such,
# its output is chosen by current state and the signal as well.
#
def event(self, signal):
# XXX strip line
signal['line'] = signal['line'].strip()
# XXX fixups
signal['line'] = OOFileParser.fixup(signal['line'])
#
if self.state == self.StateEnum.init:
ret = self.event_init(signal)
elif self.state == self.StateEnum.idle:
ret = self.event_idle(signal)
elif self.state == self.StateEnum.heirarchy:
ret = self.event_heirarchy(signal)
elif self.state == self.StateEnum.section:
ret = self.event_section(signal)
elif self.state == self.StateEnum.section_note:
ret = self.event_section_note(signal)
elif self.state == self.StateEnum.section_note_one:
ret = self.event_section_note_one(signal)
elif self.state == self.StateEnum.section_note_two:
ret = self.event_section_note_two(signal)
elif self.state == self.StateEnum.text:
ret = self.event_text(signal)
# XXX keep track of centered text preceeded by lines ending with ':'
if self.state != self.StateEnum.idle:
self.last_line_ended_with_colon = signal['line'].endswith(':')
#
return ret
def event_init(self, signal):
logging.uberdebug('init')
# XXX skip first line
return self.transition_idle(signal)
def event_idle(self, signal):
logging.uberdebug('idle')
line, adjust, lmargin, weight = signal['line'], signal['adjust'], signal['lmargin'], signal['weight']
if line == '':
return self.transition_self(signal)
elif lmargin > 0:
return self.transition_text(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_section(signal)
elif OOFileParser.test_subsec(line, adjust):
return self.transition_subsection(signal)
elif OOFileParser.test_heirarchy(line):
return self.transition_heirarchy(signal)
elif OOFileParser.test_anonymous_heirarchy(line, adjust, weight):
# XXX skip anonymous heirarchies
return self.transition_self(signal)
# XXX should we only be able to enter subheader state
# from heirarchy state to prevent mistaking text for subheaders?
# elif adjust == 'center' and not line.startswith('WARNING'):
elif adjust == 'center' and self.last_line_ended_with_colon is False:
return self.transition_heirarchy_subheader(signal)
else:
# assume text attached to previous section/subsection
return self.transition_text(signal)
##
# NOTE if we transition away, flush stashed output signal.
#
def event_heirarchy(self, signal):
logging.uberdebug('heirarchy')
line, adjust, weight = signal['line'], signal['adjust'], signal['weight']
if line == '':
# don't transition because we may get subheader
return self.transition_self(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_heirarchy_flush(self.transition_section, signal)
elif OOFileParser.test_heirarchy(line):
return self.transition_heirarchy_flush(self.transition_heirarchy, signal)
elif OOFileParser.test_anonymous_heirarchy(line, adjust, weight):
# XXX skip anonymous heirarchies
return self.transition_self(signal)
elif adjust == 'center': # XXX should we test on last_line_ended_with_colon?
return self.transition_heirarchy_subheader(signal)
else: # XXX is there something better to do here? will a subheader ever not be centered?
return self.transition_heirarchy_flush(self.transition_text, signal)
def event_section(self, signal):
logging.uberdebug('section')
line, adjust = signal['line'], signal['adjust']
if line == '':
return self.transition_idle(signal)
# XXX put fixups into fixups()?
elif line.endswith('\xa0weeks') or line == 'the use of an artificial limb':
# fixup 8-42-107
return self.transition_text(signal)
elif line.startswith('$'):
# fixup 9-4-109
return self.transition_text(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_section(signal)
elif OOFileParser.test_subsec(line, adjust):
return self.transition_subsection(signal)
elif "Editor's note" in line:
return self.transition_section_note(signal)
else: # XXX is there something better to do here?
return self.transition_text(signal)
def event_section_note(self, signal):
logging.uberdebug('section_note')
line = signal['line']
if line == '':
return self.transition_section_note_one(signal)
else:
raise RuntimeError('event_section_note ERROR: {}'.format(repr(line)))
def event_section_note_one(self, signal):
logging.uberdebug('section_note_one')
line, adjust = signal['line'], signal['adjust']
if line == '':
return self.transition_section_note_two(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_section(signal)
elif OOFileParser.test_subsec(line, adjust):
return self.transition_subsection(signal)
else:
raise RuntimeError('event_section_note_one ERROR: {}'.format(repr(line)))
def event_section_note_two(self, signal):
logging.uberdebug('section_note_two')
line, adjust = signal['line'], signal['adjust']
if line == '':
return self.transition_idle(signal)
elif OOFileParser.test_subsec(line, adjust):
return self.transition_subsection(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_section(signal)
else:
logging.warning('section_note_two assume heirarchy: {}'.format(signal))
return self.transition_heirarchy(signal)
def event_text(self, signal):
logging.uberdebug('text')
line, adjust, lmargin = signal['line'], signal['adjust'], signal['lmargin']
if line == '':
return self.transition_self(signal)
elif lmargin > 0:
return self.transition_text(signal)
elif line.endswith('\xa0weeks') or line == 'the use of an artificial limb':
# XXX fixup 8-42-107
return self.transition_text(signal)
elif line.startswith('$'):
# fixup various
return self.transition_text(signal)
elif OOFileParser.test_sec(line, adjust):
return self.transition_section(signal)
elif OOFileParser.test_subsec(line, adjust):
return self.transition_subsection(signal)
elif OOFileParser.test_heirarchy(line):
return self.transition_heirarchy(signal)
else:
# assume text attached to previous section/subsection
return self.transition_text(signal)
def transition_self(self, signal):
logging.uberdebug('self: {}'.format(signal))
return []
def transition_idle(self, signal):
logging.uberdebug('idle: {}'.format(signal))
self.state = self.StateEnum.idle
return []
##
# Stash the output signal away and flush it when we leave the
# heirarchy state.
#
def transition_heirarchy(self, signal):
logging.debug('heirarchy: {}'.format(signal))
line = signal['line']
typ,enum = line.split(' ',1)
typ = typ.lower()
output = {'type': 'heirarchy', 'subtype': typ, 'name': line, 'enum': enum, 'heading': None, 'content': None, 'status': None}
self.stash = output
self.state = self.StateEnum.heirarchy
return []
##
# Append input signal information to stashed output signal.
#
# XXX Always guard against anonymous heirarchies to avoid
# crashes on lack of incomplete heirarchy in stash.
#
def transition_heirarchy_subheader(self, signal):
logging.debug('subheader: {}'.format(signal))
if self.stash is not None:
line, weight = signal['line'], signal['weight']
head, status = OOFileParser.parse_subheader(line)
if head is not None:
if self.stash['heading'] is not None:
self.stash['heading'] += ' ' + head
else:
self.stash['heading'] = head
if status is not None:
if self.stash['status'] is not None:
self.stash['status'] += ' ' + status
else:
self.stash['status'] = status
else:
logging.warning('subheader stash is None')
self.state = self.StateEnum.heirarchy
return []
##
# Flush stashed output signal
#
def transition_heirarchy_flush(self, f, signal):
assert isinstance(f, types.MethodType)
sig = self.stash
self.stash = None
ret = f(signal)
ret.insert(0, sig)
return ret
def transition_section(self, signal):
logging.uberdebug('section: {}'.format(signal))
sec = OOFileParser.tokenize_section(signal['line']) # return enum, head, status, text, subsecl
logging.uberdebug('section sec: {}'.format(sec))
ret = [{'type': 'heirarchy', 'subtype': 'section', 'name': None, 'enum': sec[0], 'heading': sec[1], 'content': sec[3], 'status': sec[2]}]
subsec = sec[4]
while subsec is not None: # return enum, status, text, subsecl
logging.uberdebug('section subsec: {}'.format(subsec))
ret.append({'type': 'heirarchy', 'subtype': None, 'name': None, 'enum': subsec[0], 'heading': None, 'content': subsec[2], 'status': subsec[1]})
subsec = subsec[3]
self.state = self.StateEnum.section
return ret
def transition_subsection(self, signal):
logging.uberdebug('subsection: {}'.format(signal))
subsec = OOFileParser.tokenize_subsection(signal['line'])
ret = []
while subsec is not None: # return enum, status, text, subsecl
logging.uberdebug('subsection subsec: {}'.format(subsec))
ret.append({'type': 'heirarchy', 'subtype': None, 'name': None, 'enum': subsec[0], 'heading': None, 'content': subsec[2], 'status': subsec[1]})
subsec = subsec[3]
self.state = self.StateEnum.section
return ret
def transition_section_note(self, signal):
logging.uberdebug('section_note: {}'.format(signal))
self.state = self.StateEnum.section_note
return []
def transition_section_note_one(self, signal):
logging.uberdebug('section_note_one: {}'.format(signal))
self.state = self.StateEnum.section_note_one
return []
def transition_section_note_two(self, signal):
logging.uberdebug('section_note_two: {}'.format(signal))
self.state = self.StateEnum.section_note_two
return []
def transition_text(self, signal):
logging.debug('text: {}'.format(signal))
self.state = self.StateEnum.text
return []
#
# XXX these methods are complete hacks
#
@staticmethod
def test_sec(line, adjust):
return len(line) and '.\xa0' in line and '-' in line and line.split('-',1)[0][-1].isdigit() and adjust != 'center' and line[0].isdigit()
@staticmethod
def test_subsec(line, adjust):
return len(line) and '\xa0' in line and line[0] == '(' and ')' in line and adjust != 'center' and ' ' not in line.split(')',1)[0] and '_' not in line.split(')',1)[0] and '\xa0' not in line.split(')',1)[0]
@staticmethod
def test_heirarchy(line):
# XXX should there be a space after each?
# XXX is it always a digit after the word?
# XXX Title 24, Article 60, Part 22/25 have articles within!?
# XXX Section 14-5-609 starts Part C, so alphanumeric?
return len(line) and (line.startswith('TITLE ') and line.split('TITLE ',1)[1][0].isdigit() or line.startswith('PART ') and line.split('PART ',1)[1][0].isalnum() or line.startswith('SUBPART ') and line.split('SUBPART ',1)[1][0].isalnum() or line.startswith('ARTICLE ') and line.split('ARTICLE ',1)[1][0].isdigit()) and not line.endswith('.')
##
# Test for anonymous (untyped, only with heading) heirarchies.
#
# XXX need more robust logic for checking 'A.' types
#
@staticmethod
def test_anonymous_heirarchy(line, adjust, weight):
return adjust == 'center' and (weight == 'bold' or line.startswith('A.') or line.startswith('B.') or line.startswith('C.') or line.startswith('D.'))
##
#
#
@staticmethod
def parse_subheader(s):
status = None
if s.endswith('(Reserved)'):
s,_ = s.rsplit('(Reserved)',1)
status = 'incomplete'
return s or None, status
##
# Return a parsed section and with any subsection(s).
#
# XXX sometimes the header element has the first enum, e.g., 'header (a)'
#
@staticmethod
def tokenize_section(line):
l = line.split('\xa0')
logging.uberdebug('tokenize_section: {}'.format(l))
l = [s.strip() for s in l]
enum = head = status = subsecl = None
textl = [] # TODO should we join? or should they be separate <p>?
for n,s in enumerate(l):
if s == '':
pass
elif enum is None:
enum = OOFileParser.parse_sec_enum(s)
logging.uberdebug('tokenize_section enum: {}'.format(enum))
elif head is None:
head,status = OOFileParser.parse_sec_head(s)
logging.uberdebug('tokenize_section head: {} {}'.format(head, status))
elif OOFileParser.test_enum(s):
subsecl = OOFileParser.tokenize_subsection_r(l[n:])
break # input has been consumed
else:
textl.append(s)
text = str.join(' ', textl) or None
return enum, head, status, text, subsecl
##
# Return a parsed section string.
#
@staticmethod
def parse_sec_enum(s):
return s.rstrip('.')
##
# Return a parsed heading string.
#
@staticmethod
def parse_sec_head(s):
status = None
if s.endswith('(Repealed)'):
s,_ = s.rsplit('(Repealed)',1)
status = 'removed'
return s.strip().rstrip('.'), status
##
# Return a parsed subsection.
#
@staticmethod
def tokenize_subsection(line):
l = line.split('\xa0')
logging.uberdebug('tokenize_subsection: {}'.format(l))
l = [s.strip() for s in l]
ret = OOFileParser.tokenize_subsection_r(l)
return ret
##
# Parse subsection(s) from a simply tokenized string.
#
# XXX whether its a sub-subsection or another type should be
# decided at higher layers
#
@staticmethod
def tokenize_subsection_r(l):
logging.uberdebug('tokenize_subsection_r: {}'.format(l))
enum = status = subsecl = None
textl = []
for n,s in enumerate(l):
if s == '':
pass
# this subsection's enum
elif enum is None:
enum,text = OOFileParser.parse_subsec_enum(s)
logging.uberdebug('tokenize_subsection_r enum: {} {}'.format(repr(enum), repr(text)))
if text is not None:
textl.append(text)
# repealed
# XXX this should be a note or history
elif OOFileParser.test_subsec_repealed(s):
textl.append(OOFileParser.parse_subsec_repealed(s))
status = 'removed'
# enum indicating a new sub-subsection
elif OOFileParser.test_enum(s):
logging.uberdebug('tokenize_subsection_r recurs: {}'.format(l[n:]))
subsecl = OOFileParser.tokenize_subsection_r(l[n:])
break # input has been consumed
# text for this subsection
else:
logging.uberdebug('tokenize_subsection_r text: {}'.format(s))
textl.append(s)
text = str.join(' ', textl)
if text == 'Repealed.' or text == 'Repealed':
text = None
status = 'removed'
text = text or None
return enum, status, text, subsecl
##
# Test if string represents an enumeration.
#
@staticmethod
def test_enum(s):
# if s[0] == '(' or '(' in s and s[-1] == ')':
if len(s) >= 3 and (s[0] == '(' or '(' in s and s[-1] == ')') and s[-2] != '.':
return True
# elif s[0] == '(' and s[-1] == ')' and (') to (' in s or ') and (' in s):
# return True
return False
##
# Parse and return an enumeration and text before enumeration (if any).
#
# XXX how to specify range values for num in Akoma Ntoso?
# XXX how will choice of enum affect id attribute? whitespace?
#
@staticmethod
def parse_subsec_enum(s):
if ') to (' in s:
one,two = s.strip('(').split(') to (')
two,text = two.split(')',1)
enum = one + ' to ' + two
elif ') and (' in s:
one,two = s.strip('(').split(') and (')
two,text = two.split(')',1)
enum = one + ' and ' + two
else:
enum,text = s.split(')',1)
if len(text) and text[0] == ' ':
# fixup '(num) text'
text = text.strip()
enum = enum.strip('(').strip(')')
return enum, text or None
##
#
#
@staticmethod
def test_subsec_repealed(s):
return s.startswith('(Deleted')
##
# XXX this should be a note or history
#
@staticmethod
def parse_subsec_repealed(s):
return s.strip('(').strip(')')
##
# Perform specific fixups on string and return fixed-up string.
#
@staticmethod
def fixup(line):
orig = line
# sections
line = line.replace('this part\xa05', 'this part 5')
line = line.replace('property\xa0-\xa0nonprofit', 'property - nonprofit')
line = line.replace('defend\xa0-\xa0standing', 'defend - standing')
line = line.replace('complaint\xa0-\xa0service', 'complaint - service')
line = line.replace('article\xa064', 'article 64')
line = line.replace('8-17-105.Compliance standard.', '8-17-105.\xa0\xa0Compliance standard.')
# subsections
if line.startswith('(4) '):
line = '(4)\xa0\xa0' + line[5:]
elif line.startswith('(II) '):
line = '(II)\xa0\xa0' + line[5:]
line = line.replace('this part\xa05', 'this part 5')
line = line.replace('BTU/H\xa0FT', 'BTU/H FT')
line = line.replace('by section\xa07-62-1104', 'by section 7-62-1104')
line = line.replace('of subsections\xa0(1) and', 'of subsections (1) and')
line = line.replace('title\xa0shall', 'title shall')
line = line.replace('article\xa060', 'article 60')
line = line.replace('section\xa05-12-102', 'section 5-12-102')
line = line.replace('section\xa07-64-1205', 'section 7-64-1205')
line = line.replace('section\xa07-64-601', 'section 7-64-601')
# can't remember
line = line.replace('article\xa0V', 'article V')
line = line.replace('§§\xa01', '§§ 1')
line = line.replace(' §\xa038-35-106.5', ' § 38-35-106.5')
# ret
if orig is not line:
logging.debug('fixup replace: {} {}'.format(repr(orig), repr(line)))
return line
##
#
#
class OOFile():
##
#
#
@staticmethod
def run(pipename, fn, errq, outq):
try:
OOFile.parse(pipename, fn, errq, outq)
except BaseException as e:
logging.critical('OOFile.run exception: {} {}'.format(type(e), e))
outq.put(False) # poison queue
##
# Open file using desktop and parse and enqueue messages representing paragraphs.
#
@staticmethod
def parse(pipename, fn, errq, outq):
assert fn is not None and outq is not None
doc = None
# get desktop
desktop = OOFile.connect_soffice(pipename)
if not desktop:
logging.critical('OOFile.parse no desktop')
outq.put(False)
return
# open file
url = unohelper.systemPathToFileUrl(os.path.abspath(fn))
try:
doc = desktop.loadComponentFromURL(url ,'_blank', 0, (uno.createUnoStruct('com.sun.star.beans.PropertyValue', 'ReadOnly', 0, True, 0),))
except uno.getClass('com.sun.star.lang.IllegalArgumentException') as e:
logging.critical('OOFile.parse file not found: {}'.format(filename))
outq.put(False)
return
except uno.getClass('com.sun.star.lang.DisposedException') as e:
logging.critical('OOFile.parse desktop bridge died: {}'.format(e))
outq.put(False)
return
except uno.getClass('com.sun.star.uno.RuntimeException') as e:
logging.critical('OOFile.parse desktop exception: {}'.format(e))
outq.put(False)
return
if doc is None:
logging.critical('OOFile.parse doc is None')
outq.put(False)
return
# get the com.sun.star.text.Text service and get an XEnumeration of com.sun.star.text.Paragraph objects from the XEnumerationAccess
for para in OOFile.XEnumeration(doc.getText()):
lmargin = None
adjust = None
weightn = -1
style = None
# skip non-paragraphs
if not para.supportsService('com.sun.star.text.Paragraph'):
continue
# get left margin
if para.supportsService('com.sun.star.style.ParagraphProperties') and hasattr(para, 'ParaLeftMargin'):
lmargin = para.ParaLeftMargin
# get adjustment
if para.supportsService('com.sun.star.style.ParagraphProperties') and hasattr(para, 'ParaAdjust'):
adjustn = para.ParaAdjust
ss = []
# get an XEnumeration of com.sun.star.text.TextPortion objects
for portion in OOFile.XEnumeration(para):
# skip non-text portions
if portion.TextPortionType != 'Text':
continue
# get portion string
ss.append(portion.getString())
# get the last portion's weight
if portion.supportsService('com.sun.star.style.CharacterProperties') and hasattr(portion, 'CharWeight'):
weightn = portion.CharWeight
# get the last portion's style
if portion.supportsService('com.sun.star.style.ParagraphProperties') and hasattr(portion, 'ParaStyleName'):
style = portion.ParaStyleName # XXX need to strip?
# interpret data
s = str.join('', ss)
if adjustn == 3: # com.sun.star.style.ParagraphAdjust
adjust = 'center'
elif adjustn == 0:
adjust = 'left'
elif adjustn == 2:
adjust = 'block'
else:
logging.warning('OOFile.parse unknown adjust: {}'.format(adjustn))
adjust = None
if round(weightn) == 100: # com.sun.star.awt.FontWeight
weight = 'normal'
elif round(weightn) == 150:
weight = 'bold'
elif weightn == -1:
weight = None
else:
logging.warning('OOFile.parse unknown weight: {}'.format(weightn))
weight = None
message = {'line': s, 'adjust': adjust, 'lmargin': lmargin, 'weight': weight, 'style': style}
# check for error message
if errq.qsize() > 0:
try:
inmsg = errq.get(block=False)
OOFile.close(doc)
outq.put(False) # poison output queue and exit
return
except queue.Empty as e:
logging.warning('OOFile.parse errq size weirdness')
# enqueue message
outq.put(message)
# close file
OOFile.close(doc)
# poison queue
outq.put(None)
##
#
#
@staticmethod
def close(doc):
# See <https://wiki.openoffice.org/wiki/Documentation/DevGuide/OfficeDev/Closing_Documents>.
logging.debug('OOFile closing: {}'.format(doc))
try:
if doc is not None:
# XXX we should check for the com.sun.star.util.XCloseable interface first
doc.close(True)
except uno.getClass('com.sun.star.lang.DisposedException') as e:
logging.critical('OOFile.parse uno.DisposedException: {} {}'.format(doc, e))
except uno.getClass('com.sun.star.uno.RuntimeException') as e:
logging.critical('OOFile.parse uno.RuntimeException: {} {}'.format(doc, e))
##
# Get an XEnumeration of objects from a given object supporting the
# XEnumerationAccess interface.
#
@staticmethod
def XEnumeration(obj):
xenum = obj.createEnumeration()
while xenum.hasMoreElements():
yield xenum.nextElement()
##
# TODO make POSIX/Windows agnostic, e.g., USERPROFILE instead of HOME.
#
@staticmethod
def start_soffice(pipename=None):
if pipename is None:
pipename = 'officepipe-'+next(tempfile._RandomNameSequence())
tmpd = tempfile.TemporaryDirectory()
cmd = 'soffice --accept="pipe,name='+pipename+';urp;StarOffice.ServiceManager" --norestore --nologo --headless --nolockcheck'
p = subprocess.Popen(shlex.split(cmd), env={"HOME": tmpd.name}, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return pipename, tmpd, p
##
# Connect to a running soffice instance and return a XDesktop object.
#
@staticmethod
def connect_soffice(pipename, tries=5, sleep=5):
context = None
desktop = None
local = uno.getComponentContext()
resolver = local.ServiceManager.createInstanceWithContext('com.sun.star.bridge.UnoUrlResolver', local)
resolvurl = 'uno:pipe,name='+pipename+';urp;StarOffice.ComponentContext'
for i in range(tries):
try:
context = resolver.resolve(resolvurl)
if context is not None:
desktop = context.ServiceManager.createInstanceWithContext('com.sun.star.frame.Desktop', context)
except uno.getClass('com.sun.star.lang.DisposedException') as e:
logging.critical('OOFile.connect_soffice bridge died: {}'.format(e))
break
except uno.getClass('com.sun.star.connection.NoConnectException') as e:
logging.debug('OOFile.connect_soffice failed to connect {} / {}'.format(i+1, tries))
time.sleep(sleep)
if context is None or desktop is None:
logging.warning('OOFile.connect_soffice failed to connect')
return desktop
##
#
#
@staticmethod
def terminate_soffice(desktop):
if desktop is None:
logging.debug('OOFile.terminate_soffice desktop None')
return False
try:
desktop.terminate() # kills soffice
except uno.getClass('com.sun.star.lang.DisposedException') as e: # XXX needed?
logging.critical('OOFile.terminate_soffice uno.DisposedException: {} {}'.format(desktop, e))
return False
except uno.getClass('com.sun.star.uno.RuntimeException') as e:
logging.critical('OOFile.terminate_soffice uno.RuntimeException: {} {}'.format(desktop, e))
return False
return True
# do it
if __name__ == "__main__":
sys.exit(main())
| [
"esbranson@gmail.com"
] | esbranson@gmail.com | |
26f81f4eba18c5a190446ffa466434e3ef381ba3 | 41047972c9becb8ae49249b01947f4c9b97f30ed | /VPN/NTU_gcnn_Loader.py | 0c2993db0a6a15eb2956f1171ede1bb45b76641a | [] | no_license | tranminhduc4796/vpn_action_recognition | c25a5ce397268f7088ec4aade6cdb2d951829d93 | 6c2e78f2bf0b312e047ba73eb585e660e96f60d3 | refs/heads/main | 2023-02-22T19:57:59.188150 | 2021-01-20T08:34:52 | 2021-01-20T08:34:52 | 327,543,723 | 3 | 3 | null | 2021-01-19T15:29:36 | 2021-01-07T08:03:19 | Python | UTF-8 | Python | false | false | 5,367 | py | import os
import numpy as np
import keras
import glob
from random import randint, shuffle
from keras.utils import to_categorical
import cv2
from sklearn.preprocessing import LabelEncoder
os.environ['KERAS_BACKEND'] = 'tensorflow'
seed = 8
np.random.seed(seed)
class DataGenerator(keras.utils.Sequence):
def __init__(self, paths, graph_conv_filters, timesteps, mode, num_classes, stack_size, batch_size=32,
num_features=3, num_nodes=25):
self.batch_size = batch_size
self.path_skeleton = paths['skeleton']
self.path_cnn = paths['cnn']
self.files = [i.strip() for i in open(paths['split_path'] + mode + '.txt').readlines()]
self.graph_conv_filters = graph_conv_filters
self.num_classes = num_classes
self.stack_size = stack_size
self.stride = 2
self.step = timesteps
self.dim = num_features * num_nodes * 2 # for two skeletons in a single frame
self.mode = mode
self.num_features = num_features
self.num_nodes = num_nodes
self.label_enc = self.get_label_enc()
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(len(self.files) / self.batch_size)
def __getitem__(self, idx):
batch = self.files[idx * self.batch_size: (idx + 1) * self.batch_size]
graph_conv = self.graph_conv_filters[0:self.batch_size]
x_data_cnn = self._get_data_cnn(batch)
X = self._get_data_skeleton(batch)
y_data = self.label_enc.transform(np.array([int(i[-3:]) for i in batch]))
y_data = to_categorical(y_data, num_classes=self.num_classes)
y_reg = np.zeros([self.batch_size])
return [X[:, 0, :, :], X[:, 1, :, :], X[:, 2, :, :], X[:, 3, :, :], X[:, 4, :, :], X[:, 5, :, :], X[:, 6, :, :],
X[:, 7, :, :], X[:, 8, :, :], X[:, 9, :, :], X[:, 10, :, :], X[:, 11, :, :], X[:, 12, :, :],
X[:, 13, :, :],
X[:, 14, :, :], X[:, 15, :, :], X, graph_conv, x_data_cnn], [y_data, y_reg]
def get_label_enc(self):
labels = [int(file[-3:]) for file in self.files]
return LabelEncoder().fit(labels)
def on_epoch_end(self):
"""
Shuffle data after each epoch
"""
if self.mode == 'train' or self.mode == 'train_set':
shuffle(self.files)
def _get_data_skeleton(self, list_IDs_temp):
# Initialization
X = np.empty((self.batch_size, self.step, self.dim))
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
unpadded_file = np.load(self.path_skeleton + ID + '.npy')
[row, _] = unpadded_file.shape
if self.num_features == 3 and self.num_nodes == 25:
origin = unpadded_file[0, 3:6]
origin = np.tile(origin, (row, 50))
elif self.num_features == 2 and self.num_nodes == 14:
origin = unpadded_file[0, -2:]
origin = np.tile(origin, (row, 28))
unpadded_file -= origin
extra_frames = (len(unpadded_file) % self.step)
if extra_frames < (self.step / 2):
padded_file = unpadded_file[0:len(unpadded_file) - extra_frames, :]
else:
[row, col] = unpadded_file.shape
alpha = int(len(unpadded_file) / self.step) + 1
req_pad = np.zeros(((alpha * self.step) - row, col))
padded_file = np.vstack((unpadded_file, req_pad))
splitted_file = np.split(padded_file, self.step)
splitted_file = np.asarray(splitted_file)
row, col, width = splitted_file.shape
sampled_file = []
for k in range(0, self.step):
c = np.random.choice(col, 1)
sampled_file.append(splitted_file[k, c, :])
sampled_file = np.asarray(sampled_file)
X[i,] = np.squeeze(sampled_file)
X = X[:, :, 0:self.num_nodes * self.num_features]
X = np.reshape(X, [self.batch_size, self.step, self.num_nodes, self.num_features])
return X
def _get_data_cnn(self, batch):
x_train = [self._get_video(i) for i in batch]
x_train = np.array(x_train, np.float32)
x_train /= 127.5
x_train -= 1
return x_train
def _get_video(self, vid_name):
images = glob.glob(self.path_cnn + vid_name + "/*")
images.sort()
files = []
if len(images) > (self.stack_size * self.stride):
start = randint(0, len(images) - self.stack_size * self.stride)
files.extend([images[i] for i in range(start, (start + self.stack_size * self.stride), self.stride)])
elif len(images) < self.stack_size:
files.extend(images)
while len(files) < self.stack_size:
files.extend(images)
files = files[:self.stack_size]
else:
start = randint(0, len(images) - self.stack_size)
files.extend([images[i] for i in range(start, (start + self.stack_size))])
files.sort()
arr = []
for i in files:
if os.path.isfile(i):
arr.append(cv2.resize(cv2.imread(i), (224, 224)))
else:
arr.append(arr[-1])
return arr
| [
"ductm21@vingroup.net"
] | ductm21@vingroup.net |
fd7b776d413ae841b4dd9e59749cbab4d90619df | 1d56e929916154812e76583337fd14ba91dd5b91 | /Seqstat.py | 4cd5ce8699d841e105c407cffbf058a387dff632 | [
"MIT"
] | permissive | Archieyoung/pySeqkit | 60a9565ff8d4989998ca83b234d4d85e527d9d18 | 27e575bea3542919c9baa2cb4155559157b44199 | refs/heads/master | 2020-03-18T03:29:33.529571 | 2018-04-17T13:30:19 | 2018-04-17T13:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,102 | py | #!/usr/bin/env python
"""
This script is used to statistics on a list of fastq files
copyright@fanjunpeng (jpfan@whu.edu.cn)
2018/1/15: init by fanjunpeng
"""
import argparse
from multiprocessing import Pool
from FastqReader import open_fastq
from FastaReader import open_fasta
def get_length(filename, index, min_len):
"""
get the length of record
:param filename:
:return:
"""
r = []
print("[%s] process %r" % (index, filename))
fmt = filename.split(".")[-1]
if filename.endswith(".gz"):
fmt = ".".join(filename.split(".")[-2:])
if fmt.lower() in ["fastq", "fq", "fastq.gz", "fq.gz"]:
for record in open_fastq(filename):
if record.length >= min_len:
r.append(record.length)
elif fmt.lower() in ["fasta", "fa", "fasta.gz", "fa.gz"]:
for record in open_fasta(filename):
if record.length >= min_len:
r.append(record.length)
else:
print("[%s] %r is not a valid seq format!" % (index, filename))
return r
def N(number, lengths):
"""
return N{number} information of lengths
:param number: 0-100
:param lengths: a list of length
:return:
"""
assert lengths
sum_length = sum(lengths)
accu_len = 0
accu_num = 0
for i in sorted(lengths, reverse=True):
accu_len += i
accu_num += 1
if accu_len >= sum_length*number/100:
return i, accu_num, accu_len
return i, accu_num, accu_len
def over(number, lengths):
"""
return length in lengths over {number}
:param number:
:param lengths:
:return:
"""
assert lengths
accu_len = 0
accu_num = 0
for i in sorted(lengths, reverse=True):
if i < number:
return i, accu_num, accu_len
accu_len += i
accu_num += 1
return i, accu_num, accu_len
def fofn2list(fofn):
r = []
with open(fofn) as fh:
for line in fh.readlines():
line = line.strip()
if line == '':
continue
if line.startswith("#"):
continue
r.append(line)
return r
def seq_stat(filenames, ngs=False, fofn=False, concurrent=1, min_len=0):
"""
statistics on fastq files
:param filenames:
:param fofn: a file contain fastq file list
:param concurrent: concurrent process to read fastq files
:param min_len:
:return:
"""
# 1. get the lengths of each fastA/Q file
if fofn:
file_list = []
for f in filenames:
file_list += fofn2list(f)
else:
file_list = filenames
pool = Pool(processes=concurrent)
results = []
for i in range(len(file_list)):
filename = file_list[i]
index = "%s/%s" % (i+1, len(file_list))
results.append(pool.apply_async(get_length, (filename, index, min_len)))
pool.close()
pool.join()
lengths = []
for i, r in enumerate(results):
print("[%s/%s] getting results of %r" % (i+1, len(results), file_list[i]))
lengths += r.get()
# write lengths out
lengths = sorted(lengths, reverse=True)
# 2. get the common statistics
total_length = sum(lengths)
reads_number = len(lengths)
file_num = "{0:,}".format(len(file_list))
average_length = "{0:,}".format(int(total_length / reads_number))
longest = "{0:,}".format(lengths[0])
_total_length = "{0:,}".format(total_length)
reads_number = "{0:,}".format(reads_number)
print("""
Statistics for all FastA/Q records
file number: \t{file_num}
record number: \t{reads_number}
sum of length: \t{_total_length}
average length:\t{average_length}
longest length:\t{longest}
""".format(**locals()))
# 2. get the N10-N90 statstics
# length: the N{i} value; number: number of reads which length >= N{i}
# if the input file is ngs short reads, skip the following steps.
if ngs:
return 1
print("Distribution of record length")
print("%5s\t%15s\t%15s\t%10s" % ("Type", "Bases", "Count", "%Bases"))
for i in [10, 20, 30, 40, 50, 60, 70, 80, 90]:
read_length, read_number, read_length_sum = N(i, lengths)
print("%5s\t%15s\t%15s\t%10.2f" % ("N%s" % i,
"{0:,}".format(read_length),
"{0:,}".format(read_number),
100.0*read_length_sum/total_length))
# length: the sum of record length which length >= i; number: the number of record which length >= i
for i in [1, 5, 10, 20, 30, 40, 50, 60]:
_, read_number, read_length_sum = over(i*1000, lengths)
print("%5s\t%15s\t%15s\t%10.2f" % (">%skb" % i,
"{0:,}".format(read_length_sum),
"{0:,}".format(read_number),
100.0*read_length_sum/total_length))
# write out record length for plot
with open("record.len", "w") as fh:
fh.write("\n".join(map(str, lengths)))
def get_args():
"""
get args
:return:
"""
args = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
description:
Statistics on FastA/Q files
author: fanjunpeng (jpfan@whu.edu.cn)
""")
args.add_argument("input", metavar='FILEs', nargs="+", help="file paths, '*' is accepted")
args.add_argument("-ngs", action="store_true", help="input fastq reads is short reads from ngs")
args.add_argument("-f", "--fofn", action="store_true", help="input file contains file paths")
args.add_argument("--min_len", type=int, metavar="INT", default=0, help="min length to statistics")
args.add_argument("-c", "--concurrent", metavar='INT', type=int, default=1, help="number of concurrent process")
return args.parse_args()
def main():
args = get_args()
seq_stat(args.input, args.ngs, args.fofn, args.concurrent, args.min_len)
if __name__ == "__main__":
main()
| [
"schooiboy@qq.com"
] | schooiboy@qq.com |
ff63ca733498c988042c4f9a7ee4df25ecdace7c | 1305723f2ab2297bc396a58e6542a77a8ed6de8a | /flashcards.py | ca2ad8b14c8de3262db11fe559763a999915a076 | [
"MIT"
] | permissive | sneakythumbs/flashcards | ff86d3fd2d9ce678be0e6475b66db99ee2ccced4 | b93e93981dc6529cb3c139278708e1027f730858 | refs/heads/master | 2021-01-22T09:47:40.544430 | 2015-04-29T19:51:17 | 2015-04-29T19:51:17 | 34,813,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | import sys
import fileinput
from pylab import *
header = True
column = -1
prompt = raw_input("Enter the filename(s) to be read:\n")
files = prompt.split(", ")
for line in fileinput.input(files):
s = line.split("\t|\t")
if header:
print"Select language to sort by:",
for word in s[:-1]:
print "[" + word + ",",
print s[-1].strip("\r\n") + "]"
language = raw_input("")
i = 0
for word in s:
if language == word.strip("\r\n"):
column = i
break;
i += 1
if column == -1:
print "Error: no such language"
print "Exiting"
sys.exit()
header = False
continue;
if not header:
# print line[:-1]
continue;
| [
"sneakythumbs@users.noreply.github.com"
] | sneakythumbs@users.noreply.github.com |
71f27e6f44fc1dfef7571b27982acccf33236218 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/ring/siren.py | 7f1b147471d271411715ee41520529c0afef4805 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 1,678 | py | """Component providing HA Siren support for Ring Chimes."""
import logging
from typing import Any
from ring_doorbell.const import CHIME_TEST_SOUND_KINDS, KIND_DING
from homeassistant.components.siren import ATTR_TONE, SirenEntity, SirenEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
from .entity import RingEntityMixin
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Create the sirens for the Ring devices."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
sirens = []
for device in devices["chimes"]:
sirens.append(RingChimeSiren(config_entry, device))
async_add_entities(sirens)
class RingChimeSiren(RingEntityMixin, SirenEntity):
"""Creates a siren to play the test chimes of a Chime device."""
_attr_available_tones = CHIME_TEST_SOUND_KINDS
_attr_supported_features = SirenEntityFeature.TURN_ON | SirenEntityFeature.TONES
_attr_translation_key = "siren"
def __init__(self, config_entry: ConfigEntry, device) -> None:
"""Initialize a Ring Chime siren."""
super().__init__(config_entry.entry_id, device)
# Entity class attributes
self._attr_unique_id = f"{self._device.id}-siren"
def turn_on(self, **kwargs: Any) -> None:
"""Play the test sound on a Ring Chime device."""
tone = kwargs.get(ATTR_TONE) or KIND_DING
self._device.test_sound(kind=tone)
| [
"noreply@github.com"
] | noreply@github.com |
d20193ef0287422fa66d20f0836d9d60526f6e1a | 61e300590cde2b01ec7834275087e7c408e8afec | /carts/views.py | 0b392b011ce9b329661e2a8ea25b9a1f000aaadd | [] | no_license | nabhanda/shopwithus | 7c4f450765b0b63018f20ece8cc933cf58c132d8 | 37f178f39090d4f7c25320aecf9455dbce657670 | refs/heads/master | 2020-03-23T22:47:41.514598 | 2018-10-12T13:07:26 | 2018-10-12T13:07:26 | 142,197,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,781 | py | from django.http import JsonResponse
from django.shortcuts import render, redirect
from accounts.forms import LoginForm, GuestForm
from accounts.models import GuestEmail
from orders.models import Order
from product.models import Product
from billing.models import BillingProfile
from addresses.forms import AddressForm
from addresses.models import Address
from .models import Cart
def cart_detail_api_view(request):
cart_obj, new_obj = Cart.objects.new_or_get(request)
product = [{
"id": x.id,
"url": x.get_absolute_url(),
"name": x.name,
"price": x.price
}
for x in cart_obj.product.all()]
cart_data = {"product": product, "subtotal": cart_obj.subtotal, "total": cart_obj.total}
return JsonResponse(cart_data)
def cart_home(request):
cart_obj, new_obj = Cart.objects.new_or_get(request)
return render(request, "carts/home.html", {"cart":cart_obj})
def cart_update(request):
product_id = request.POST.get('product_id')
if product_id is not None:
try:
product_obj = Product.objects.get(id=product_id)
except Product.DoesNotExist:
print("May be Product is no more listed. Try after some time")
return redirect("cart")
cart_obj, new_obj = Cart.objects.new_or_get(request)
if product_obj in cart_obj.product.all():
cart_obj.product.remove(product_obj)
added = False
else:
cart_obj.product.add(product_obj)
added = True
request.session['cart_items'] = cart_obj.product.count()
#return redirect(product_obj.get_absolute_url())
if request.is_ajax(): # Asynchronous JavaScript And XML / JSON
print("Ajax request")
json_data = {
"added": added,
"removed": not added,
"cartItemCount": cart_obj.products.count()
}
return JsonResponse(json_data, status=200) # HttpResponse
# return JsonResponse({"message": "Error 400"}, status=400) # Django Rest Framework
return redirect("cart")
def checkout_home(request):
cart_obj, cart_created = Cart.objects.new_or_get(request)
order_obj = None
if cart_created or cart_obj.product.count() == 0:
return redirect("cart")
login_form = LoginForm()
guest_form = GuestForm()
address_form = AddressForm()
billing_address_id = request.session.get("billing_address_id", None)
shipping_address_id = request.session.get("shipping_address_id", None)
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
print(billing_profile)
address_qs = None
if billing_profile is not None:
if request.user.is_authenticated:
address_qs = Address.objects.filter(billing_profile=billing_profile)
print(address_qs)
order_obj, order_obj_created = Order.objects.new_or_get(billing_profile, cart_obj)
if shipping_address_id:
order_obj.shipping_address = Address.objects.get(id=shipping_address_id)
del request.session["shipping_address_id"]
if billing_address_id:
order_obj.billing_address = Address.objects.get(id=billing_address_id)
del request.session["billing_address_id"]
if billing_address_id or shipping_address_id:
order_obj.save()
if request.method == "POST":
'some check that order is done'
is_done=order_obj.check_done()
if is_done:
order_obj.mark_paid()
request.session['cart_items'] = 0
del request.session['cart_id']
return redirect("success")
context = {
"object": order_obj,
"billing_profile": billing_profile,
"login_form": login_form,
"guest_form": guest_form,
"address_form": address_form,
"address_qs": address_qs
# "billing_address_form": billing_address_form
}
return render(request, "carts/checkout.html", context)
# def checkout_done_view(request):
# # billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
# # cart_obj, cart_created = Cart.objects.new_or_get(request)
# # if request.user.is_authenticated:
# # order_obj, order_obj_created = Order.objects.new_or_get(billing_profile, cart_obj)
# # context = {
# # "object": order_obj,
# # }
# # return render(request, "carts/checkout-done.html", context)
# return render(request, "carts/checkout-done.html", {})
def checkout_done_view(request):
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
cart_obj, cart_created = Cart.objects.new_or_get(request)
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
print(billing_profile)
address_qs = None
if billing_profile is not None:
if request.user.is_authenticated:
address_qs = Address.objects.filter(billing_profile=billing_profile)
order_obj, order_obj_created = Order.objects.new_or_get(billing_profile, cart_obj)
context = {
"object": order_obj,
}
return render(request, "carts/checkout-done.html", context)
##############Working Cart view but do not have user association###############
# def cart_home(request):
# #del request.session['cart_id']
# cart_id = request.session.get("cart_id", None)
# if cart_id is None: #and isinstance(cart_id, int):
# cart_obj = Cart.objects.create(user=None)
# request.session['cart_id'] = cart_obj.id
# print('New Cart created')
# else:
# print('Cart id exists')
# print(cart_id)
# #cart_obj = Cart.objects.get(id=cart_id)
# return render(request,"carts/home.html", {}) | [
"nabhanda@oradev.oraclecorp.com"
] | nabhanda@oradev.oraclecorp.com |
7332bb72184308f1c755b9859e825e727dc18a52 | 2205363ea412aae36aa2c5f8b7d608cd8a158a03 | /Personal_Blog/Pb/Pb/settings.py | d3f8de8c66bb1455f934b84f6bb3190cd42b086b | [] | no_license | Akanksha2403/HacktoberFest2020 | 986ef7ba5595679085e5159d35c5a30d9e91ebc5 | 789762e3a4a3ad23fd2c1ca3b6cc3bc8f39eed82 | refs/heads/master | 2023-08-28T04:25:07.466359 | 2021-10-20T10:16:46 | 2021-10-20T10:16:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,674 | py | """
Django settings for Pb project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-0r=r9##5pcrhvdnxxoblg4uj7#@^n$z3t%+a7&t@1_4ebckoxo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Blog.apps.BlogConfig',
'chat.apps.ChatConfig',
'resume',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Pb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR/'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Pb.wsgi.application'
ASGI_APPLICATION = 'chatty.asgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"lit2020026@gmail.com"
] | lit2020026@gmail.com |
38ffeefe71c4acb79a5a838efeb26765465afa7f | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/bin/iptest | 62ac6036fc5f261d69ea933bb91ed9bee7ded5ca | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 269 | #!/Users/yenchiehliao/Dropbox/bisCrawler/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com | |
f9d21162737f40168c323f56d4a303bf6211ce0c | c6d89d2507efe02ead1802649a769e021795b2b6 | /categories/context_processors.py | cb9c2687489bdc34c2746a89d05b11c34a37b16c | [] | no_license | ikonitas/pleasuresallmine | b671b05d2f13428973cc19d39e58d0b56d1914f0 | 875e6067a202be801a9b1fddb27c4d313fd133f4 | refs/heads/master | 2021-05-29T19:50:39.812885 | 2014-11-27T21:22:22 | 2014-11-27T21:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # coding=utf-8
from django.core.cache import cache
from models import Category
def list_categories(request):
categories = cache.get('list_categories')
if not categories:
categories = Category.objects.filter(
is_active=True).order_by('sort_order')
cache.set('list_categories', categories, 60)
return {'list_categories': categories}
| [
"ikonitas@gmail.com"
] | ikonitas@gmail.com |
29511c1e8bcf903725d957b2e420756cc1908ad8 | 29d7ba390d4b6046666f783e682ea248108ea900 | /cbagent/__main__.py | 09669d3c032cc65f432c457e3e7024f81dfcc2cd | [
"Apache-2.0"
] | permissive | pavel-paulau/cbagent | 5f289fbaf08b997b55d270944d67f716ec1a127a | f905974d663e0320e55a00076d292cbf489e53d9 | refs/heads/master | 2020-04-26T09:55:43.761203 | 2014-07-31T12:41:18 | 2014-07-31T12:41:18 | 13,084,444 | 2 | 1 | null | 2014-06-19T02:15:22 | 2013-09-25T04:52:21 | Python | UTF-8 | Python | false | false | 2,380 | py | import sys
from optparse import OptionParser
from cbagent.collectors.active_tasks import ActiveTasks
from cbagent.collectors.iostat import IO
from cbagent.collectors.latency import Latency
from cbagent.collectors.observe import ObserveLatency
from cbagent.collectors.net import Net
from cbagent.collectors.ns_server import NSServer
from cbagent.collectors.ps import PS
from cbagent.collectors.sync_gateway import SyncGateway
from cbagent.collectors.xdcr_lag import XdcrLag
from cbagent.settings import Settings
def main():
parser = OptionParser(prog="cbagent")
parser.add_option("--at", action="store_true", dest="active_tasks",
help="Active tasks")
parser.add_option("--io", action="store_true", dest="iostat",
help="iostat")
parser.add_option("--l", action="store_true", dest="latency",
help="Latency")
parser.add_option("--o", action="store_true", dest="observe",
help="Observe latency")
parser.add_option("--n", action="store_true", dest="net",
help="Net")
parser.add_option("--ns", action="store_true", dest="ns_server",
help="ns_server")
parser.add_option("--ps", action="store_true", dest="ps",
help="ps CPU, RSS and VSIZE")
parser.add_option("--sg", action="store_true", dest="sync_gateway",
help="Sync Gateway")
parser.add_option("--x", action="store_true", dest="xdcr_lag",
help="XDCR lag")
options, args = parser.parse_args()
if not args:
sys.exit("No configuration provided")
if options.active_tasks:
collector = ActiveTasks
elif options.iostat:
collector = IO
elif options.latency:
collector = Latency
elif options.observe:
collector = ObserveLatency
elif options.net:
collector = Net
elif options.ns_server:
collector = NSServer
elif options.ps:
collector = PS
elif options.sync_gateway:
collector = SyncGateway
elif options.xdcr_lag:
collector = XdcrLag
else:
sys.exit("No collector selected")
settings = Settings()
settings.read_cfg(args[0])
collector = collector(settings)
collector.update_metadata()
collector.collect()
if __name__ == '__main__':
main()
| [
"pavel.paulau@gmail.com"
] | pavel.paulau@gmail.com |
e170f688e59520f390ab02a6b3e1b52b161b747b | 66bfac516682bc8c3c804a5b7414cfc8b3440186 | /leads/apps/leads/serializers.py | 5e9d220555f6c26071a166a7b386b109ee1a7eb8 | [] | no_license | Izaiasjun1Dev/leads | 190d1bf01f1809c34cb53582e0f1020c3d704b58 | 22a209b43fd0eb60218deba731c9bf189ea9568a | refs/heads/master | 2023-04-05T15:15:39.834194 | 2021-03-30T11:35:36 | 2021-03-30T11:35:36 | 352,966,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from rest_framework import serializers
from .models import Lead
# Serializador lead
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = '__all__'
| [
"solucaoprogramer@gmail.com"
] | solucaoprogramer@gmail.com |
9f1290f7f6f31c7b9141d7a8df5c908e18069708 | 28d74e08aeacf3703ab739849164e3f35f9af2a3 | /CUNY_ML_Course_CSC74020/Logistic Regression & Regularization/log_reg_book.py | c71385fcc0dbe1c06b9fd36e1807ff989c985224 | [] | no_license | wfeng66/Machine-Learning | 5ef87b2149bdf50e7516e808c86176b8c7aeb900 | de85d596ee97e7234d338ce47ee2addca56ccb89 | refs/heads/master | 2022-07-01T09:11:02.803824 | 2022-06-25T00:35:24 | 2022-06-25T00:35:24 | 173,125,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py | '''
create a logistic regression from scratch
including feature transform, PCA
apply Breast cancer data set
'''
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
class log_reg():
def __init__(self, lr=0.25, epoch=100000, batch_size=4, fold=5, init='random') -> None:
self.init = init
self.epoch = epoch
self.lr = lr
self.batch_size = batch_size
self.folding = fold
self.loss_in = []
self.loss_out = []
def fold(self):
self.X_val, self.Y_val= self.X[400:], self.Y[400:]
self.X, self.Y = self.X[:400], self.Y[:400]
# self.X_val, self.Y_val= self.X[50:], self.Y[50:]
# self.X, self.Y = self.X[:50], self.Y[:50]
def init_w(self):
if self.init == 'zero':
self.W = np.zeros((self.n, ))
elif self.init == 'random':
self.W = np.random.normal(0, 0.01, (self.n, ))
else:
print('The initilizer only accept "zero" or "random"!')
return
def sigmoid(self, z):
e = np.exp(z)
return e/(1+e)
def z(self, x):
# zz = np.dot(x, self.W)
# print("z:", x.shape, self.W.shape, zz.shape)
return np.dot(x, self.W)
def gradient(self, X, Y):
# numerator = np.dot(Y, X)
# print('W: ', self.W.shape)
# print('X.T: ', X.T.shape)
# print('Y: ', Y.shape)
#tmp = np.dot(X.T, self.W)
# e_exp = np.dot(Y, np.dot(X, self.W))
# return -np.sum(numerator/(1+np.exp(e_exp)))/X.shape[0]
# prev_y = self.sigmoid(self.z(X))
# print('prev:', prev_y.shape, 'Y:', Y.shape, 'X.T:', X.T.shape)
# return (1/self.m)*np.dot(X.T, self.sigmoid(self.z(X))-Y)+2*self.wd*self.W
# return -(1/self.m)*np.dot(X.T, prev_y*(1-prev_y)*(Y-prev_y))
# return (-1/X.shape[0])*np.sum(((Y.reshape(-1, 1)*X)/(1 + np.exp(Y*self.z(X))).reshape((-1,1))) , axis=0) +2*self.wd*self.W
# print((Y.reshape(-1, 1)*X).shape)
# print((1 + np.exp(Y*self.z(X))).shape)
g = np.zeros((X.shape[1], ))
for i in range(len(X)):
a = 0
for j in range(len(X[i])):
a = a + self.W[j]*X[i][j]
g = g + ((Y[i]*X[i])/(1+np.exp(Y[i]*a)))
return -g/(X.shape[0])
def update(self, X, Y):
# delata = self.gradient(X, Y)
# print('W:', self.W.shape, 'delata:', delata.shape, delata)
self.W = self.W - self.lr*self.gradient(X, Y)
def loss(self, x, y):
# print(self.W.T.shape)
# print(x.shape)
# l = np.sum(np.log(1+np.exp(-y.dot(np.dot(x, self.W)))))/x.shape[0]
# print(x.shape, y.shape)
# z = self.z(x)
# print(z[:3])
# prev_y = self.sigmoid(self.z(x))
# print(prev_y[:5])
# print(y[:5])
# l = -(1/x.shape[0])*np.sum(y*np.log(self.sigmoid(self.z(x)))+\
# (1-y)*np.log(1-self.sigmoid(self.z(x)))) + self.wd*np.dot(self.W, self.W.T)
# print(l)
# return (1/x.shape[0])*np.sum(np.log(1 + np.exp(-y*self.z(x))) , axis=0) + self.wd*np.dot(self.W, self.W.T)
loss = 0
for i in range(len(x)):
a = 0
for j in range(len(x[i])):
a = a + self.W[j]*x[i][j]
loss = loss + np.log(1+np.exp(-y[i]*a))
return loss/x.shape[0]
def fit(self, X, Y, wd=0.0):
"""Train the model
Args:
X (np.array): training data set
Y (np.array): label
wd (float): the weight decay supermeter - lambda. Defaults to 0.
Returns:
weights(np.array): the final weights
loss_in(list): the list of history loss in sample
loss_out(list): the list of history loss out of sample
"""
self.wd = wd
self.X = np.concatenate((np.ones((X.shape[0], 1)), X), 1)
self.Y = Y
self.fold()
self.m, self.n = self.X.shape
self.init_w()
for _ in range(self.epoch):
if _ == 10000:
self.lr = self.lr/10
elif _== 50000:
self.lr = self.lr/10
for i in range((self.m-1)//self.batch_size+1):
xb = self.X[i*self.batch_size:(i+1)*self.batch_size]
yb = self.Y[i*self.batch_size:(i+1)*self.batch_size]
self.update(xb, yb)
# print('in: ', end=' ')
l_in = self.loss(self.X, self.Y)
# print('out: ', end=' ')
l_out = self.loss(self.X_val, self.Y_val)
self.loss_in.append(l_in)
self.loss_out.append(l_out)
print('In: {}, Out: {}...'.format(l_in, l_out))
return self.W, self.loss_in, self.loss_out
def load_bc():
import sklearn.datasets as ds
bc = ds.load_breast_cancer()
X_bc = bc.data
y_bc = bc.target
# y_bc[y_bc==0] = -1 # convert 0 to -1 in target
return X_bc, y_bc
import pandas as pd
# df = pd.read_csv("G://temp/marks.txt", header=None)
# X_bc = np.array(df.iloc[:, :-1])
# y_bc = np.array(df.iloc[:, -1])
X_bc, y_bc = load_bc() # load data
def pca_tran(X, n):
pca = PCA(n_components=n)
pcaComponets = pca.fit_transform(X)
pca_arr = np.array(pcaComponets)
return pca_arr
lr = log_reg()
# X_bc = pca_tran(X_bc, 30)
scaler = StandardScaler()
X_bc = scaler.fit_transform(X_bc)
W, loss_in, loss_out = lr.fit(X_bc, y_bc, 0.002)
# import matplotlib.pyplot as plt
# x = range(0, 1001, 1)
# plt.plot(x, loss_in, label='loss_in')
# plt.plot(x, loss_out, label='loss_out')
# plt.legend(['train', 'val'])
# plt.show()
| [
"weicongf73@gmail.com"
] | weicongf73@gmail.com |
21df3f525aa8018d4988caa7395c9afa3a066060 | 18596ac4d79761a60e118c915378e0bb71bf0244 | /dangxuanENV/Scripts/dangxuan/apps/televisions/models.py | 9f2a88c225eb8c4170089c2a9ae0da4fea8f624f | [] | no_license | I-AM-DESPERADO/DjangoDemo | 06fdf5ecaa88a3b24d0368f95962a57c8ed85ff7 | 012edd11ee671211f52917d5b57dc480b5f7ad24 | refs/heads/master | 2021-09-07T11:21:55.586979 | 2018-02-22T06:20:26 | 2018-02-22T06:20:26 | 104,620,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | # _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
class TelivisionProgram(models.Model):
television_title = models.CharField(verbose_name=u'电视节目名称', max_length=255)
note = models.CharField(verbose_name=u'描述', max_length=255,null=True)
class Meta:
verbose_name = u"电视节目信息"
verbose_name_plural = verbose_name
class TelevisionProgramContent(models.Model):
television_program_id = models.ForeignKey(TelivisionProgram,verbose_name=u'电视节目id')
thumbnails_url = models.URLField(verbose_name=u'缩略图URL', max_length=255)
video_url = models.URLField(verbose_name=u'视频URL', max_length=255)
video_introduction = models.CharField(verbose_name=u'视频介绍', max_length=255)
video_timestamp = models.TimeField(verbose_name=u'加入时间')
note = models.CharField(verbose_name=u'说明', max_length=255,null=True)
class Meta:
verbose_name = u'电视节目实体信息'
verbose_name_plural = verbose_name
| [
"1569890780@qq.com"
] | 1569890780@qq.com |
181e07eb777edf892a8df8abc17d2ad592bd5481 | 7d8ad9e28a7c8bfe310e76eadaa866927d02cb63 | /backtesters/bollinger_momentum.py | 36f0ac71c1b724552a4363296ce3820bb1a468c1 | [
"MIT"
] | permissive | boochi046/crypto-price-analysis | 9b671abef2c39739d4e29de98427fc419e0119b9 | e9985479bb0e0128ce38cec48b870f3d1868b034 | refs/heads/master | 2020-04-14T02:36:58.276069 | 2018-12-19T15:51:40 | 2018-12-19T15:51:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,587 | py | '''Backtest Moving Average (MA) crossover strategies
'''
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import timedelta
import talib
from backtester import Backtester
class BollingerMomentum(Backtester):
'''
Momentum strategy based on bollinger bands
If you touch then upper band you go long until you cross the middle band
If you touch then lower band you go short until you cross the middle band
Parameters:
series: (Panda Series) a list of CLOSE prices by date
lookback (int) lookckback period for billinger
distance : (float) number of std deviations for the band
'''
def __init__(self, series, lookback=14, distance=2):
self._lookback = lookback
self._distance = distance
super(BollingerMomentum,self).__init__(series,long_only=False)
def __str__(self):
return "Bollinger Momentum Backtest (lookback=%d, distance=%0.1f, start=%s, end=%s)" % (
self._lookback, self._distance, str(self._start_date), str(self._end_date))
def plot(self, start_date=None, end_date=None, figsize=None):
'''Plot of prices and the the buy and sell points
Stratgies can add their own additional indicators
'''
sns.set_style("white")
fig, ax = plt.subplots(figsize=figsize)
fig.suptitle(self.__str__(), size=13)
Backtester.plot(self,start_date=start_date,end_date=end_date, ax=ax)
temp = self._df.loc[start_date:end_date]
# ax1.legend()
ax.plot(temp['lower'], color='silver', alpha=0.5, label = 'bollinger')
ax.plot(temp['middle'],"--", color='silver', alpha=0.3)
ax.plot(temp['upper'], color='silver', alpha=0.5, label = 'bollinger')
ax.fill_between(temp.index,temp['lower'],temp['upper'], alpha=0.2, color='silver')
ax.legend()
plt.tight_layout()
plt.show()
def _trade_logic(self):
'''Implements the trade logic in order to come up with
a set of stances
'''
self._df['upper'], self._df['middle'], self._df['lower'] = talib.BBANDS(self._df['last'],
timeperiod=self._lookback, nbdevup=self._distance,nbdevdn=self._distance)
current_stance = 0
stances = []
for index in np.arange(0,len(self._df)):
long_signal = False
short_signal = False
long_close = False
short_close = False
close = self._df['last'].iloc[index]
if close >= self._df['upper'].iloc[index]:
long_signal = True
if close < self._df['lower'].iloc[index]:
short_signal = True
if close <= self._df['middle'].iloc[index]:
long_close = True
if close >= self._df['middle'].iloc[index]:
short_close = True
if current_stance == 0:
if long_signal:
current_stance = 1
elif short_signal:
current_stance = -1
elif current_stance == 1:
if long_close:
current_stance = 0
if short_signal:
current_stance = -1
elif current_stance == -1:
if short_close:
current_stance = 0
if long_signal:
current_stance = 1
stances.append(current_stance)
self._df['stance'] = stances
| [
"daniel@blackhatmedia.com"
] | daniel@blackhatmedia.com |
8cd2cc4ef6bde6bb958a5160732122d1e4d5c2af | b46513de1a1fe8eadbd01518fc6b8067de277aee | /vdbm/dumb.py | 242e9f060e0925f6dc82f8bc9f9bc41d641c8dc1 | [] | no_license | vrthra/taint-demo | b4b83f28727341d1723df1157e8a8ac67fc69097 | 9eb50f214dc5178b27ba7e4945441b31091037f9 | refs/heads/master | 2021-08-30T23:21:00.371936 | 2017-12-19T20:16:05 | 2017-12-19T20:16:05 | 114,804,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,187 | py | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import ast as _ast
import io
import os
import collections
import vdbm.taint
def sanitize(src): return src
for m in [io]: vdbm.taint.mark_sources(m)
sanitize = vdbm.taint.sanitizer(sanitize)
for m in [os]: vdbm.taint.mark_sinks(m)
eval = vdbm.taint.sink(eval)
__all__ = ["error", "open"]
_BLOCKSIZE = 512
error = OSError
class _Database(collections.MutableMapping):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = os # for _commit()
_io = io # for _commit()
def __init__(self, filebasename, mode, flag='c'):
self._mode = mode
self._readonly = (flag == 'r')
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + '.dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + '.dat'
self._bakfile = filebasename + '.bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Handle the creation
self._create(flag)
self._update()
def _create(self, flag):
if flag == 'n':
for filename in (self._datfile, self._bakfile, self._dirfile):
try:
_os.remove(filename)
except OSError:
pass
# Mod by Jack: create data file if needed
try:
f = io.open(self._datfile, 'r', encoding="Latin-1")
except OSError:
if flag not in ('c', 'n'):
import warnings
warnings.warn("The database file is missing, the "
"semantics of the 'c' flag will be used.",
DeprecationWarning, stacklevel=4)
with io.open(self._datfile, 'w', encoding="Latin-1") as f:
self._chmod(self._datfile)
else:
f.close()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = io.open(self._dirfile, 'r', encoding="Latin-1")
except OSError:
self._modified = not self._readonly
else:
self._modified = False
with f:
for line in f:
line = sanitize(line.rstrip())
key, pos_and_siz_pair = eval(line, globals(), locals())
key = key.encode('Latin-1')
self._index[key] = pos_and_siz_pair
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None or not self._modified:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except OSError:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except OSError:
pass
with self._io.open(self._dirfile, 'w', encoding="Latin-1") as f:
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.items():
# Use Latin-1 since it has no qualms with any value in any
# position; UTF-8, though, does care sometimes.
entry = "%r, %r\n" % (key.decode('Latin-1'), pos_and_siz_pair)
f.write(entry)
sync = _commit
def _verify_open(self):
if self._index is None:
raise error('DBM object has already been closed')
def __getitem__(self, key):
if isinstance(key, str):
key = key.encode('utf-8')
self._verify_open()
pos, siz = self._index[key] # may raise KeyError
with io.open(self._datfile, 'rb') as f:
f.seek(pos)
dat = f.read(siz)
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
with io.open(self._datfile, 'rb+') as f:
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write(b'\0'*(npos-pos))
pos = npos
f.write(val)
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
with io.open(self._datfile, 'rb+') as f:
f.seek(pos)
f.write(val)
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
with io.open(self._dirfile, 'a', encoding="Latin-1") as f:
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key.decode("Latin-1"), pos_and_siz_pair))
def __setitem__(self, key, val):
if self._readonly:
import warnings
warnings.warn('The database is opened for reading only',
DeprecationWarning, stacklevel=2)
if isinstance(key, str):
key = key.encode('utf-8')
elif not isinstance(key, (bytes, bytearray)):
raise TypeError("keys must be bytes or strings")
if isinstance(val, str):
val = val.encode('utf-8')
elif not isinstance(val, (bytes, bytearray)):
raise TypeError("values must be bytes or strings")
self._verify_open()
self._modified = True
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
if self._readonly:
import warnings
warnings.warn('The database is opened for reading only',
DeprecationWarning, stacklevel=2)
if isinstance(key, str):
key = key.encode('utf-8')
self._verify_open()
self._modified = True
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). __setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
try:
return list(self._index)
except TypeError:
raise error('DBM object has already been closed') from None
def items(self):
self._verify_open()
return [(key, self[key]) for key in self._index.keys()]
def __contains__(self, key):
if isinstance(key, str):
key = key.encode('utf-8')
try:
return key in self._index
except TypeError:
if self._index is None:
raise error('DBM object has already been closed') from None
else:
raise
def iterkeys(self):
try:
return iter(self._index)
except TypeError:
raise error('DBM object has already been closed') from None
__iter__ = iterkeys
def __len__(self):
try:
return len(self._index)
except TypeError:
raise error('DBM object has already been closed') from None
def close(self):
try:
self._commit()
finally:
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod(self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def open(file, flag='c', mode=0o666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, supports only the semantics of 'c' and 'n'
values. Other values will default to the semantics of 'c' value:
the database will always opened for update and will be created if it
does not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0o666 (and
will be modified by the prevailing umask).
"""
# Modify mode depending on the umask
try:
um = os.umask(0)
os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
if flag not in ('r', 'w', 'c', 'n'):
import warnings
warnings.warn("Flag must be one of 'r', 'w', 'c', or 'n'",
DeprecationWarning, stacklevel=2)
return _Database(file, mode, flag=flag)
| [
"rahul@gopinath.org"
] | rahul@gopinath.org |
412d94ad7ce1d9d7b92b6406d8aa5350f3f77fe9 | 2e79b8f2e4cc5ea10789de787f787fdc56137993 | /leetcode/438.找到字符串中所有字母异味词.py | e1f674a878c118e39a1c3fa3bfafdb8b51fc9564 | [] | no_license | wangye707/Test | d486ccb0947f6a83662a73fb56554260d1445c30 | 0d5fb8ea7da79d7d168d99f7158c8aa5757a1d35 | refs/heads/master | 2020-06-04T05:48:46.132054 | 2020-04-28T14:53:30 | 2020-04-28T14:53:30 | 191,894,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!D:/workplace/python
# -*- coding: utf-8 -*-
# @File : 438.找到字符串中所有字母异味词.py
# @Author: WangYe
# @Date : 2019/9/25
# @Software: PyCharm
def findAnagrams(s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
out = []
need = {}
for i in p:
if i in need:
need[i] += 1
else:
need[i] = 1
l = len(p)
win = {}
for i in range(len(s)-l+1):
if i ==0:
win = {}
for k in s[i:i + l]:
if k in win:
win[k] += 1
else:
win[k] = 1
else:
# print(s[i-1],win)
if win[s[i-1]] >1:
win[s[i-1]] -=1
else:
del win[s[i-1]]
if s[i+l-1] in win:
win[s[i+l-1]] +=1
else:
win[s[i+l-1]] = 1
if win==need:
out.append(i)
return out
s = "cbaebabacd"
p = "abc"
print(findAnagrams(s,p)) | [
"1119744330@qq.com"
] | 1119744330@qq.com |
dbdee5eef5cc684505c11a610e47fa8b0e522e5e | d6529fb62de5f5b41ba46c49bb73dd8ae9bb4194 | /tools/conversion/Converter.py | 35dbd564005fc1bdbda16dd1f0e3123683adc3a9 | [
"MIT"
] | permissive | Blackdevil132/aes | 45fb3d6b880fc9697c96c095c2283f78518fab67 | 4c1226ff2946ea9da9cea43e441c9cfda12ca972 | refs/heads/master | 2021-01-01T19:54:22.495680 | 2020-01-05T12:15:21 | 2020-01-05T12:15:21 | 98,718,294 | 0 | 0 | null | 2017-07-29T21:59:23 | 2017-07-29T08:05:55 | Python | UTF-8 | Python | false | false | 4,796 | py | from typing import List, Sequence
from tools.datatypes.State import State
from tools.datatypes.Byte import Byte
hexcToInt = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, }
hexcToBin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
def hexToBin(hex: str) -> str:
"""
convert hexadecimal values into binary. result will have at least 4 digits
:param hex: hexadecimal value
:return: corresponding binary value
"""
binary = ""
for i in range(len(hex)):
binary += hexcToBin[hex[i]]
return binary
def hexToInt(hex: str) -> int:
"""
mostly for converting 2 digit hexadecimal values into integers
:param hex: (2 digit) hexadecimal value
:return: corresponding integer value(base 10)
"""
integer = 0
for i in range(len(hex)):
integer += hexcToInt[hex[-(i+1)]] * 16 ** i
return integer
def intToHex(integer: int) -> str:
"""
convert integer(base 10) to hexadecimal. Hexadecimal value will have at least 2 digits
:param integer: value with base 10
:return: corresponding hexadecimal value
"""
h = hex(integer)
h = h[2:]
if len(h) == 1:
h = '0' + h
return h
def intToBin(integer: int) -> str:
"""
convert integer with base 10 to binary
:param integer: value with base 10
:return: corresponding binary value
"""
s = ""
if not integer:
return "0"
while integer:
s = str(integer % 2) + s
integer = int(integer / 2)
return s
def binToInt(binary: str) -> int:
"""
convert binary value to int with base 10
:param binary: value with base 2
:return: corresponding value with base 10
"""
integer = 0
for i in range(len(binary)):
integer += int(binary[-(i+1)]) * 2 ** i
return integer
def binToHex(binary: str) -> str:
"""
convert from base 2 to base 16
:param binary: value with base 2
:return: value with base 16
"""
return intToHex(binToInt(binary))
def bytearrayToState(a: Sequence[Byte]) -> State:
"""
Convert 16 Bytes into a State
:param a: iterable object with 16 Bytes
:return: State
"""
state = State()
for c in range(4):
for r in range(4):
state[r][c] = a[r + 4 * c]
return state
def stateToBytearray(state: State) -> List[Byte]:
"""
convert State into its corresponding List of Bytes
:param state: State
:return: List of 16 Bytes
"""
array = [Byte() for i in range(16)]
for c in range(4):
for r in range(4):
array[r + 4 * c] = state[r][c]
return array
def binToBytearray(bits: str) -> List[Byte]:
"""
convert binary sequence into a List of Bytes
:param bits: binary sequence with length divisible by 8
:return: List of Bytes
"""
length = len(bits)
nb = int(length / 8)
array = [Byte() for i in range(nb)]
for n in range(nb):
array[n] = Byte(
[bits[8 * n], bits[8 * n + 1], bits[8 * n + 2], bits[8 * n + 3], bits[8 * n + 4], bits[8 * n + 5],
bits[8 * n + 6], bits[8 * n + 7]])
return array
def hexToBytearray(hex: str) -> List[Byte]:
"""
convert sequence of hexadecimal symbols to List of Bytes. length of hexadecimal sequence needs to be divisible by 2
:param hex: hex. sequence with length divisible by 2
:return: corresponding List of Bytes
"""
length = len(hex)
nb = int(length / 2)
array = [Byte() for i in range(nb)]
for n in range(nb):
array[n] = Byte(hex[2*n] + hex[2*n + 1])
return array
def bytearrayToHex(ba: Sequence[Byte]) -> str:
"""
convert List of Bytes into hexadecimal str
:param ba: List of Bytes
:return: hex in str
"""
hex = ""
for b in ba:
hex += str(b)[1:3]
return hex
def textToHex(text: str) -> str:
"""
convert a ASCII encoded text into hexadecimal sequence
:param text: text
:return: hexadecimal str
"""
hseq = ""
for c in text:
i = ord(c)
hseq += intToHex(i)
return hseq
def hexToText(hex: str) -> str:
text = ""
for i in range(0, len(hex), 2):
h = hex[i:i+2]
#print("h: %s" % h, end='')
c = chr(hexToInt(h))
#print(" -> c: %s\n" % c)
text += c
return text
def bytearrayToText(ba: Sequence[Byte]) -> str:
h = bytearrayToHex(ba)
return hexToText(h)
def textToBytearray(text:str) -> List[Byte]:
h = textToHex(text)
return hexToBytearray(h)
| [
"larsbengel@uni-koblenz.de"
] | larsbengel@uni-koblenz.de |
df3b159fcb643903c95d367dee0173bd363b6814 | 685dbb4416caf20f3e7e9c701df35088cb308d98 | /pen_blink_detection/haar_face&eyedetection.py | b30f660c73698ceaca1c7994ef464cb5163f5785 | [] | no_license | cseho67/2020_senior_project | e4469afa9ee488c5d406c5bd037c20542133ff9d | a653c9751315cfb5d6a8713ea46e13cae964aa67 | refs/heads/master | 2023-03-23T09:55:31.328411 | 2021-03-12T11:28:22 | 2021-03-12T11:28:22 | 297,645,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | import numpy as np
import cv2
def detectAndDisplay(frame) :
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.equalizeHist(frame_gray)
faces = face_cascade.detectMultiScale(frame_gray)
for (x,y,w,h) in faces :
center = (x + w//2 , y + h //2 )
frame = cv2.rectangle(frame, (x,y) , (x+w,y+h),(0,0,255),4)
faceGOT = frame_gray[y:y+h,x:x+w]
eyes = eyes_cascade.detectMultiScale(faceGOT)
for (x2,y2,w2,h2) in eyes :
eye_center = (x + x2 + w2//2 , y + y2 + h2//2)
radius = int(round((w2+h2)*0.25))
frame = cv2.circle(frame, eye_center,radius,(0,255,0),4)
cv2.imshow('HAAR face',frame)
img = cv2.imread("./image/image.jpg")
print( "width : {} pixels".format(img.shape[1]))
print( "height : {} pixels".format(img.shape[0]))
print( "channels : {} ".format(img.shape[2]))
(height , width) = img.shape[:2]
cv2.imshow("original Image", img)
face_cascade_name = "./harr/haarcascade_frontalface_alt.xml"
eyes_cascade_name = "./harr/haarcascade_eye_tree_eyeglasses.xml"
face_cascade = cv2.CascadeClassifier()
eyes_cascade = cv2.CascadeClassifier()
#cascade 따오기
if not face_cascade.load(cv2.samples.findFile(face_cascade_name)):
print("no file x ")
exit(0)
if not eyes_cascade.load(cv2.samples.findFile(eyes_cascade_name)):
print("no file x ")
exit(0)
detectAndDisplay(img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"48603413+cseho67@users.noreply.github.com"
] | 48603413+cseho67@users.noreply.github.com |
5946ceacec2ae4a3be3c157b5b5d605114c3ed7c | e0f0379c84b858959950fa25c19a80c453ec05b5 | /main.py | 8a3fea249dc3b5421d5da29f208a66712c538d0b | [] | no_license | fuwalab/popular-noun | 26e87362f58dae0b2447cd1e8580115d075c0bc6 | 0d4964938ea8a4a2a9c946fe60de7abab0ae7911 | refs/heads/master | 2020-04-14T23:44:53.671033 | 2019-01-08T14:49:04 | 2019-01-08T14:49:04 | 164,214,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | #!/usr/bin/env python
from scraping import Scraping
from datetime import datetime
from lib.analyze import Analyze
from joblib import Parallel, delayed
def main():
providers = [
'naver',
]
"""スクレイピングした内容をテーブルに保存する"""
# Parallel(n_jobs=1, verbose=0)([
# delayed(Scraping.run)(Scraping(), provider) for provider in providers
# ])
Scraping.run(Scraping(), 'naver')
"""スクレイピング結果から名詞をテーブルに保存する"""
Analyze.save_words(Analyze())
if __name__ == '__main__':
print('start: ' + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
main()
print('end: ' + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
| [
"ryo.tsugawa@gmail.com"
] | ryo.tsugawa@gmail.com |
ee2c0aca4610558fbd930ddc73708712cf37de3f | 7260b155bef0a6ea3454ea1a8d05c945db98b57a | /scripts/topic.py | 784af7b326871561334aab1e389d3250935cd990 | [] | no_license | freddie1129/web-enron-project | a0ad2a9e394daa8b3d31b052d15d60cb3e6591a0 | cdbfbecf69d4af0972bc3ffb5e5547357175acdf | refs/heads/master | 2020-03-23T12:29:48.304308 | 2018-11-12T14:51:11 | 2018-11-12T14:51:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | #import spacy
#spacy.load('en')
from spacy.lang.en import English
parser = English()
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.startswith('@'):
lda_tokens.append('SCREEN_NAME')
else:
lda_tokens.append(token.lower_)
return lda_tokens
import nltk
from nltk.corpus import wordnet as wn
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
from nltk.stem.wordnet import WordNetLemmatizer
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
for w in ['dogs', 'ran', 'discouraged']:
print(w, get_lemma(w), get_lemma2(w))
en_stop = set(nltk.corpus.stopwords.words('english'))
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
return tokens
import random
def run():
text_data = []
with open('./2.csv') as f:
for line in f:
tokens = prepare_text_for_lda(line)
if random.random() > .99:
print(tokens)
text_data.append(tokens)
text_data = []
raw_text = """This rustic lamb casserole is full of flavour, especially if made ahead, and the lamb is meltingly tender. Harissa is a chilli paste with quite a kick; rose harissa, which I prefer to use, is sweeter and less fiery due to the addition of rose petals. I don’t like my food too spicy, so this dish is mild, but if you prefer it hot just add more harissa and good luck!"""
tokens = prepare_text_for_lda(raw_text)
text_data.append(tokens)
import gensim
from gensim import corpora
dictionary = corpora.Dictionary(text_data)
corpus = [dictionary.doc2bow(text) for text in text_data]
import pickle
pickle.dump(corpus, open('corpus.pkl', 'wb'))
dictionary.save('dictionary.gensim')
NUM_TOPICS = 5
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=NUM_TOPICS, id2word=dictionary, passes=15)
ldamodel.save('model5.gensim')
topics = ldamodel.print_topics(num_words=4)
for topic in topics:
print(topic)
| [
"freddiechenchen@gmail.com"
] | freddiechenchen@gmail.com |
cc8e6f027c3f9a654091243124118655bba7ed21 | 0da2927420dec6003c7e745422ac708f2df6a129 | /FormatowanieTekstu.py | 19999ca8c82b8da164285161491f573cd05e3e6b | [] | no_license | KamilJantos/Python_I | a37bafc0ded945dd4fd2f303fe51ad0ee870fb0c | 660563015f01171c92a740b5b6437f09160a3030 | refs/heads/master | 2022-12-25T19:30:34.394224 | 2020-10-12T15:42:08 | 2020-10-12T15:42:08 | 303,437,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | # formatowanie znane z Pythona 2.x
wyznanie = "Lubię %s" % "Pythona"
print(wyznanie)
wonsz = "Python"
print("Lubię %sa" % wonsz)
print("Lubię %s oraz %sa" % ("Pythona", wonsz))
# %s oznacza, że w miejsce tego znacznika będzie podstawiany ciąg tekstowy
# %i - to liczba całkowita
# %f - liczba rzeczywista lub inaczej zmiennoprzecinkowa
# %x lub #X - liczba całkowita zapisana w formie szesnastkowej
print("Używamy wersji Python %i" % 3)
print("A dokładniej Python %f" % 3.5)
print("Chociaż lepiej to zapisać jako Python %.1f" % 3.5)
print("A kolejną glówną wersją Pythona może być wersja %.4f" % 3.6666)
print("A może będzie to wersja %.1f ?" % 3.6666)
print("A może jednak %.f ?" % 3.6666)
wersja = 4
print("A %i w systemie szesnastkowym to %X" % (wersja, wersja))
print("A %i * %i szesnastkowo daje %X" % (wersja, wersja, wersja*wersja))
# Chociaż możliwości przy korzystaniu z mechanizmów powyżej są spore,
# to i kilka wad się również znajdzie. Trzeba pilnować zarówno liczby argumentów jak
# i ich kolejności. Konieczne jest powielanie tej samej zmiennej jeżeli kilka
# razy jest wykorzystywana w formatowanym ciągu. Spójrzmy na inne możliwości.
print("Lubię %(jezyk)s" % {"jezyk": "Pythona"})
print("Lubię %(jezyk)s a czy Ty lubisz %(jezyk)s ?" % {"jezyk": "Pythona"})
# wadą jest dość duża ilość dodatkowego kodu do napisania, ale nazwy zmiennych
# w ciągu pozwalają na ich szybką identyfikację i wielokrotne wykorzystanie w
# dowolnej kolejności
# poniżej kolejny sposób
print("Lubię język {1} oraz {0}".format("Java", "Python"))
# w nowej wersji języka Python możliwe jest również odwoływanie się do elementów
#kolekcji lub pól klasy
class Osoba:
def __init__(self, imie, nazwisko):
self.imie = imie
self.nazwisko = nazwisko
kr = Osoba("Jan", "Nowak")
print("Tą osobą jest {0.imie} {0.nazwisko}".format(kr))
| [
"kamil.jantos7@gmail.com"
] | kamil.jantos7@gmail.com |
bd879dda1376bcb95331072006ffddb2e26bf6e4 | e0c176d11c7582158f1ab9722b3d1e71761f6b1a | /src/eqc/zholkovskij.py | 59e5d6d7f9eeb80d339ecabc602302f6803570c7 | [
"MIT"
] | permissive | robbje/eis | 93a199cfa52460707a84c037c7cb9ea0ad27732a | a7e0cac9e00e57f605cd9d9536d385a3f632e506 | refs/heads/master | 2021-01-18T22:24:42.178465 | 2016-07-11T08:55:55 | 2016-07-11T08:55:55 | 38,840,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | import numpy as np
# Implements parts of the analytical transmission line models found in
# "Irreversible Thermodynamics and Impedance Spectroscopy of Multilayer Membranes" - E. Zholkovskij
# http://dx.doi.org/10.1006/jcis.1995.1034
def tanh(x):
np.seterr(all='raise')
try:
y = np.tanh(x)
except:
y = 1.0 if np.real(x) > 0.0 else -1.0
np.seterr(all='warn')
return y
Zr = lambda w,wk: np.complex128(np.sqrt(0.5*w/wk)*(1+1j))
Zd = lambda w,g,t,r: np.complex128(g*t*(1-t)*r/tanh(r))
def Bilayer(w, p):
(g1, t1, w1) = p[0:3]
(g2, t2, w2) = p[3:6]
r1 = Zr(w,w1)
r2 = Zr(w,w2)
nom = (t2 - t1)**2
denom = Zd(w,g1,t1,r1) + Zd(w,g2,t2,r2)
Z = nom/denom
return Z
def trilambda(w, p):
(g1, t1, w1) = p[0:3]
(g2, t2, w2) = p[3:6]
(g3, t3, w3) = p[6:9]
r1 = Zr(w,w1)
r2 = Zr(w,w2)
r3 = Zr(w,w3)
nom = g2*t2*(1-t2)*r2**2
denom = (Zd(w,g1,t1,r2)+Zd(w,g2,t2,r2))*(Zd(w,g2,t2,r2)+Zd(w,g3,t3,r3))*(np.sinh(r2)**2)
result = nom/denom
if np.isnan(np.real(result)): result = 0.0
return result
def Trilayer(w, p):
(g1, t1, w1) = p[0:3]
(g2, t2, w2) = p[3:6]
(g3, t3, w3) = p[6:9]
Z12 = Bilayer(w, p[0:6])
Z23 = Bilayer(w, p[3:9])
h123 = trilambda(w,p)
nom = Z12 + Z23 + 2 * h123 * np.sqrt(Z12*Z23)
denom = 1 - h123**2
return nom/denom
def Quadlayer(w, p):
(g1, t1, w1) = p[0:3]
(g2, t2, w2) = p[3:6]
(g3, t3, w3) = p[6:9]
(g4, t4, w4) = p[9:12]
r1 = Zr(w,w1)
r2 = Zr(w,w2)
r3 = Zr(w,w3)
r4 = Zr(w,w4)
Z12 = Bilayer(w, p[0:6])
Z23 = Bilayer(w, p[3:9])
Z34 = Bilayer(w, p[6:12])
h123 = trilambda(w,p[0:9])
h234 = trilambda(w,p[3:12])
nom = (1-h234**2)*Z12 + (1-h123**2)*Z34 + Z23
nom += 2*h123*np.sqrt(Z12*Z23) + 2*h234*np.sqrt(Z23*Z34)
nom += 2*h123*h234*np.sqrt(Z12*Z34)
denom = 1-(h123**2+h234**2)
return nom/denom
| [
"robert.femmer@avt.rwth-aachen.de"
] | robert.femmer@avt.rwth-aachen.de |
1a5b1416d35a4f87113659da2f108d396f184d69 | ab5a01f0fef7f3d824aa2adead9d3e5ed4e48037 | /UtilitySplit/venv/bin/flask | 444bb9f13ad9e69a8a712bcde7a829a81d8f3a93 | [] | no_license | bob7479/UtilitySplit | 41ee0dc94a10dfcd6ab46a336709df82da046051 | 3837984b13ffbd55b5a327ea95d33748d500fb96 | refs/heads/master | 2020-08-03T18:06:44.517823 | 2016-12-23T04:10:24 | 2016-12-23T04:10:24 | 73,541,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/Users/macbook/Documents/CalHacks/flaskr/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"macbook@airbears2-10-142-157-15.airbears2.1918.berkeley.edu"
] | macbook@airbears2-10-142-157-15.airbears2.1918.berkeley.edu | |
c623380ca8277769f08041e14cc66374a1963eb7 | 5be7afab3f57b7b5365053700386c01bad7031e6 | /quotes.toscrape.com/1.2.quote_web_scraping/spiders/quotes_spider.py | 16b47d33a8206b7bb7caf819229b34ef62e264fb | [] | no_license | enji-coder/SCRAPY-PROJECTS | c0c76e1ef8697320a0cb9b3fa9155a158574a5c1 | bd65e6f3cf83912bc082ef39aba702db6cc4465c | refs/heads/main | 2023-06-20T19:11:36.764847 | 2021-08-04T04:39:08 | 2021-08-04T04:39:08 | 386,542,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import scrapy
class ExampleSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['example.com']
start_urls = ['http://quotes.toscrape.com']
def parse(self, response):
all_quotes = response.css('div.quote')
# retrive all quotes title , author and tag details
# note it retrive 1st page all data only
for quotes in all_quotes:
desc = quotes.css('span.text::text').extract()
author = quotes.css('.author::text').extract()
tag = quotes.css('div.tags a::text').extract()
yield{
'--->> desc': desc,
'author': author,
'tag': tag,
}
| [
"47570231+enji-coder@users.noreply.github.com"
] | 47570231+enji-coder@users.noreply.github.com |
e6ea0a18c418751b3458be9dd1196e1a7f5514d0 | 2d13b3206b04d663eed9c5cfe7b6d273abaab33e | /2.Algorithm/pycharm/SW Academy/20200309/harvest.py | 89098f8eaff5f7281c33299f947b60d69d741907 | [] | no_license | hdp0545/TIL | 0ba5378274f0076cd2b029581b292785a77207da | 6d6e5e54373bd71606823e97b3a5fb2d63a2784e | refs/heads/master | 2023-05-24T12:37:33.690750 | 2023-05-19T06:57:49 | 2023-05-19T06:57:49 | 235,004,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | for test_case in range(1, int(input())+1):
N = int(input())
matrix = [list(map(int, [n for n in input()])) for _ in range(N)]
result = 0
c = N // 2
for i in range(N):
di = (N//2) - abs(i - (N//2))
result += sum(matrix[i][c-di:c+di+1])
print('#{} {}'.format(test_case, result)) | [
"hdp0545@gmail.com"
] | hdp0545@gmail.com |
1fa53956af9d567b5bb6cde0572f8a7cb11d736f | 70121257e52e0fd2f0895414fcee3c991737443a | /python_recipes/tfpreprocess_cifar.py | 33aaef3fdca4998831ffa5306a3bf25f080ae646 | [] | no_license | OlgaBelitskaya/cookbooks | 2e54208bb5e5157814deea6ff71cd7ce5b1e4972 | 216dde3e5617203371ed4c4bb7d9e8391640c588 | refs/heads/master | 2021-07-11T15:56:44.923442 | 2021-03-25T08:38:46 | 2021-03-25T08:38:46 | 99,447,645 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import warnings; warnings.filterwarnings('ignore')
import tensorflow as tf,numpy as np,pandas as pd
import tensorflow_datasets as tfds
from IPython.display import display,HTML
pd.set_option('precision',3)
tf.keras.backend.set_floatx('float64')
tfds.disable_progress_bar()
img_size=32
buffer_size,batch_size=10000,64
c1,c2,f1,f2,fs1,fs2=\
'#11ff66','#6611ff','Wallpoet','Orbitron',20,10
def dhtml(string,fontcolor=c1,font=f1,fontsize=fs1):
display(HTML("""<style>
@import 'https://fonts.googleapis.com/css?family="""\
+font+"""&effect=3d-float';</style>
<h1 class='font-effect-3d-float'
style='font-family:"""+font+\
"""; color:"""+fontcolor+\
"""; font-size:"""+str(fontsize)+"""px;'>
%s</h1>"""%string))
def load_cifar():
cifar=tfds.builder('cifar10')
cifar.download_and_prepare()
ds=cifar.as_dataset(shuffle_files=False,
split=['train','test'])
cifar_train,cifar_test=ds[0],ds[1]
dhtml(cifar.info.features['image'],c2,f2,fs2)
dhtml(cifar.info.features['label'],c2,f2,fs2)
cifar_train=cifar_train.map(
lambda item:(tf.cast(item['image'],tf.float32)/255.,
tf.cast(item['label'],tf.int32)))
cifar_test=cifar_test.map(
lambda item:(tf.cast(item['image'],tf.float32)/255.,
tf.cast(item['label'],tf.int32)))
tf.random.set_seed(123)
cifar_train=cifar_train.shuffle(
buffer_size=buffer_size,
reshuffle_each_iteration=False)
cifar_valid=cifar_train.take(buffer_size).batch(batch_size)
cifar_train=cifar_train.skip(buffer_size).batch(batch_size)
return cifar_train,cifar_valid,cifar_test
| [
"safuolga@gmail.com"
] | safuolga@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.