Please note, this is a STATIC archive of website www.tutorialspoint.com from 11 May 2019, cach3.com does not collect or store any user information, there is no "phishing" involved.
Tutorialspoint

Promedio python

#-*- coding: utf-8 -*-
#esta línea se requiere para poder escribir acentos y ñ's

def Promedio( coll ):
    acum = 0
    for i in range(0, len(coll)):
        acum += coll[i]
    
    promedio = acum / len (coll)
    return promedio

def main():
    calificaciones = [9.8, 8.8, 9.5, 8.5]
    
    print(calificaciones)
    
    for x in calificaciones:
        print( x )
        
    print("********************************")
    
    for i in range(0, len(calificaciones)):
        print(calificaciones[i])
        
    print("********************************\n")
    
    acum = 0
    for x in calificaciones:
        acum += x
    
    promedio = acum / len (calificaciones)
    print("El promedio es: ", promedio)
    
    ##################################
    
    promedio = Promedio(calificaciones)
    print("El promedio es: ", promedio)
    
if __name__ == '__main__' :
    main()

hola mundo

# Hello World program in Python
    
print "Hello World!\n"

Execute Python Online

import os
import csv
from functions import *
import scipy.stats as st
import numpy as np

with open("results_sign_novelDTI_drugs_ndcg.csv", "w") as resFile:
        top_k_size = 10
        resFile.write("\n")
        resFile.write("dataset;method;nDCG;t_nDCG;p_nDCG\n" )
        dt = ["gpcr","ic", "nr", "e"]#
        met = [ "blmnii", "wnngip", "netlaprls", "cmf","brdti"] #,"knn_bprcasq"
        for dataset in dt: 
            resFile.write("\n")
            max_ndcg = 0    
            v_max_ndcg = np.ones(50)            
            for cp in met: #get maximal values for each evaluation metric throughout the evaluated methods
                with open(os.path.join('output','newDTI',cp+'_'+dataset+'_'+str(top_k_size)+'_drug_stats.csv'), 'rb') as csvfile:
                    reader = csv.reader(csvfile, delimiter=';', quotechar='"')
                    res = np.array(list(reader) ) 
                    v_ndcg = res[:,3]                  
                    v_ndcg = [s for s in v_ndcg if s != "nan" and s != "ndcg"]
                    v_ndcg = [float(d) for d in v_ndcg]
                
                
                avg_ndcg = np.mean(v_ndcg)
                if avg_ndcg > max_ndcg:
                    max_ndcg = avg_ndcg
                    v_max_ndcg = v_ndcg[:]

                   
            for cp in met:  #calculate stat. sign. of other methods vs. the best one 
                with open(os.path.join('output','newDTI',cp+'_'+dataset+'_'+str(top_k_size)+'_drug_stats.csv'), 'rb') as csvfile:
                    reader = csv.reader(csvfile, delimiter=';', quotechar='"')
                    res = np.array(list(reader) ) 
                    cp_ndcg = res[:,3]                  
                    cp_ndcg = [s for s in cp_ndcg if s != "nan" and s != "ndcg"]
                    cp_ndcg = [float(d) for d in cp_ndcg]
            
                x1, y1 = st.ttest_rel(v_max_ndcg, cp_ndcg)
                resFile.write(dataset+";"+cp+";%.6f;%.9f;%.9f\n" % (np.mean(cp_ndcg), x1, y1/2.0) )
                print dataset,cp, np.mean(cp_ndcg), x1, y1
            print ""
            


with open("results_sign_novelDTI_drugs_recall.csv", "w") as resFile:
        top_k_size = 10
        resFile.write("\n")
        resFile.write("dataset;method;recall;t_recall;p_recall\n" )
        dt = ["gpcr","ic", "nr", "e"]#
        met = [ "blmnii", "wnngip", "netlaprls", "cmf","knn_bprcasq"] #,"knn_bprcasq"
        for dataset in dt: 
            resFile.write("\n")
            max_ndcg = 0    
            v_max_ndcg = np.ones(50)            
            for cp in met: #get maximal values for each evaluation metric throughout the evaluated methods
                with open(os.path.join('output','newDTI',cp+'_'+dataset+'_'+str(top_k_size)+'_drug_stats.csv'), 'rb') as csvfile:
                    reader = csv.reader(csvfile, delimiter=';', quotechar='"')
                    res = np.array(list(reader) ) 
                    v_ndcg = res[:,4]                  
                    v_ndcg = [s for s in v_ndcg if s != "nan" and s != "recall"]
                    v_ndcg = [float(d) for d in v_ndcg]
                
                
                avg_ndcg = np.mean(v_ndcg)
                if avg_ndcg > max_ndcg:
                    max_ndcg = avg_ndcg
                    v_max_ndcg = v_ndcg[:]

                   
            for cp in met:  #calculate stat. sign. of other methods vs. the best one 
                with open(os.path.join('output','newDTI',cp+'_'+dataset+'_'+str(top_k_size)+'_drug_stats.csv'), 'rb') as csvfile:
                    reader = csv.reader(csvfile, delimiter=';', quotechar='"')
                    res = np.array(list(reader) ) 
                    cp_ndcg = res[:,4]                  
                    cp_ndcg = [s for s in cp_ndcg if s != "nan" and s != "recall"]
                    cp_ndcg = [float(d) for d in cp_ndcg]
            
                x1, y1 = st.ttest_rel(v_max_ndcg, cp_ndcg)
                resFile.write(dataset+";"+cp+";%.6f;%.9f;%.9f\n" % (np.mean(cp_ndcg), x1, y1/2.0) )
                print dataset,cp, np.mean(cp_ndcg), x1, y1
            print ""
            

Execute Python Online

import os
import time
import numpy as np
import rank_metrics as rank
from collections import defaultdict


def load_data_from_file(dataset, folder):
    #print(os.path.join(folder, dataset+"_admat_dgc.txt"))
    with open(os.path.join(folder, dataset+"_admat_dgc.txt"), "r") as inf:
        inf.next()
        int_array = [line.strip("\n").split()[1:] for line in inf]

    with open(os.path.join(folder, dataset+"_simmat_dc.txt"), "r") as inf:  # the drug similarity file
        inf.next()
        drug_sim = [line.strip("\n").split()[1:] for line in inf]

    with open(os.path.join(folder, dataset+"_simmat_dg.txt"), "r") as inf:  # the target similarity file
        inf.next()
        target_sim = [line.strip("\n").split()[1:] for line in inf]

    intMat = np.array(int_array, dtype=np.float64).T    # drug-target interaction matrix
    drugMat = np.array(drug_sim, dtype=np.float64)      # drug similarity matrix
    targetMat = np.array(target_sim, dtype=np.float64)  # target similarity matrix

    if dataset == "metz":
        intMat[intMat == -1] = 0
    
    return intMat, drugMat, targetMat


def get_drugs_targets_names(dataset, folder):
    with open(os.path.join(folder, dataset+"_admat_dgc.txt"), "r") as inf:
        drugs = inf.next().strip("\n").split()
        targets = [line.strip("\n").split()[0] for line in inf]
    return drugs, targets


def cross_validation(intMat, seeds, cv=0, invert=0, num=10):
    cv_data = defaultdict(list)
    for seed in seeds:
        num_drugs, num_targets = intMat.shape
        prng = np.random.RandomState(seed)
        if cv == 0:
            index = prng.permutation(num_drugs)
        if cv == 1:
            index = prng.permutation(intMat.size)
        step = index.size/num
        for i in xrange(num):
            if i < num-1:
                ii = index[i*step:(i+1)*step]
            else:
                ii = index[i*step:]
            if cv == 0:
                test_data = np.array([[k, j] for k in ii for j in xrange(num_targets)], dtype=np.int32)
            elif cv == 1:
                test_data = np.array([[k/num_targets, k % num_targets] for k in ii], dtype=np.int32)
            x, y = test_data[:, 0], test_data[:, 1]
            test_label = intMat[x, y]
            W = np.ones(intMat.shape)
            W[x, y] = 0
            
            #print test_data
            #print  np.column_stack((y,x))
            #print type(test_data), type(np.column_stack((y,x)))
            
            if invert:
                W_T = W.T
                test_data_T = np.column_stack((y,x))
                cv_data[seed].append((W_T, test_data_T, test_label))               
            else:    
                cv_data[seed].append((W, test_data, test_label))
    return cv_data


def train(model, cv_data, intMat, drugMat, targetMat):
    aupr, auc, ndcg, ndcg_inv, results = [], [], [], [], []
    for seed in cv_data.keys():
        for W, test_data, test_label in cv_data[seed]:
            t = time.clock()
            model.fix_model(W, intMat, drugMat, targetMat, seed)
            aupr_val, auc_val, ndcg_val, ndcg_inv_val = model.evaluation(test_data, test_label)
            results = results + [("","","","")] + zip(test_data[:,0],test_data[:,1],test_label,model.scores)
            
            print(aupr_val, auc_val, ndcg_val, ndcg_inv_val , time.clock()-t)
            aupr.append(aupr_val)
            auc.append(auc_val)
            ndcg.append(ndcg_val)
            ndcg_inv.append(ndcg_inv_val)
    return np.array(aupr, dtype=np.float64), np.array(auc, dtype=np.float64), np.array(ndcg, dtype=np.float64), np.array(ndcg_inv, dtype=np.float64), results


def svd_init(M, num_factors):
    from scipy.linalg import svd
    U, s, V = svd(M, full_matrices=False)
    ii = np.argsort(s)[::-1][:num_factors]
    s1 = np.sqrt(np.diag(s[ii]))
    U0, V0 = U[:, ii].dot(s1), s1.dot(V[ii, :])
    return U0, V0.T


def mean_confidence_interval(data, confidence=0.95):
    import scipy as sp
    import scipy.stats
    a = 1.0*np.array(data)
    n = len(a)
    m, se = np.mean(a), scipy.stats.sem(a)
    h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
    return m, h


def write_metric_vector_to_file(auc_vec, file_name):
    np.savetxt(file_name, auc_vec, fmt='%.6f')


def load_metric_vector(file_name):
    return np.loadtxt(file_name, dtype=np.float64)


def plot_aupr(self, prec, rec, thr, name):
    import matplotlib.pyplot as plt
    plt.clf()
    plt.ioff()
    plt.plot(rec, prec, label='Precision-Recall')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('Precision-Recall ')
    plt.legend(loc="lower left")
    fig = plt.figure()
    fig.savefig(name+'.png', bbox_inches='tight')
    fig.savefig(name+'.pdf', bbox_inches='tight')
       
def normalized_discounted_cummulative_gain(test_data,test_label, scores):
    unique_users = np.unique(test_data[:,0])
    user_array = test_data[:,0]
    ndcg = []
                
    for u in unique_users:
        indices_u =  np.in1d(user_array, [u])
        labels_u = test_label[indices_u].astype(float)
        scores_u = scores[indices_u].astype(float)
        #ndcg is calculated only for the users with some positive examples
        if not all(i <= 0.001 for i in labels_u):                        
            tmp = np.c_[labels_u,scores_u]
            tmp = tmp[tmp[:,1].argsort()[::-1],:]
            ordered_labels = tmp[:,0]
            ndcg_u = rank.ndcg_at_k(ordered_labels,ordered_labels.shape[0],1)
            ndcg.append(ndcg_u)            
    return np.mean(ndcg)
        

        

Execute Python Online

import os
import csv
import numpy as np
import rank_metrics as rank
from functions import *

class newDTIPrediction:
    def __init__(self):        
        with open(os.path.join('data','novelDrugsKEGG.csv'), 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=';', quotechar='"')
            kg = np.array(list(reader))            
            t = kg[np.arange(1,kg.shape[0]),0]
            d = kg[np.arange(1,kg.shape[0]),1]
            self.kegg = zip(d,t)
        
        with open(os.path.join('data','novelDrugsDrugBank.csv'), 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=';', quotechar='"')
            db = np.array(list(reader))            
            t = db[np.arange(1,db.shape[0]),1]
            d = db[np.arange(1,db.shape[0]),0]
            self.drugBank = zip(d,t)  
        
        with open(os.path.join('data','novelDrugsMatador.csv'), 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=';', quotechar='"')
            mt = np.array(list(reader))            
            t = mt[np.arange(1,mt.shape[0]),1]
            d = mt[np.arange(1,mt.shape[0]),0]
            self.matador = zip(d,t)  
                   
                    
        #print(self.kegg)
        #print(self.drugBank)
        #print(self.matador)
        
    def analyse_new_known_interactions(self): 
        self.allData =   self.kegg + self.drugBank + self.matador
        for dataset in ["gpcr","ic","nr","e"]:
            drug_names, target_names = get_drugs_targets_names(dataset, os.path.join("data", 'datasets'))
            new_interactions = set([s for s in self.allData if any(s[0] in d for d in drug_names) and any(s[1] in t.replace("hsa","hsa:") for t in target_names)])
            print(len(self.allData),len(drug_names),len(target_names))
            print(dataset)
            print(len(new_interactions))
            #print(new_interactions)
            
    def verify_novel_interactions(self, method, dataset, sz, predict_num, drug_names, target_names):    
        drugs = np.unique(sz[:,0])
        targets = np.unique(sz[:,1])
        self.drugs_ndcg = []
        self.targets_ndcg = []
        self.drugs_recall = []
        self.targets_recall = []
        
        new_dti_drugs = os.path.join('output/newDTI', "_".join([method, dataset,str(predict_num), "drugs_new_dti.csv"]))
        out_dti_d = open(new_dti_drugs, "w")
        out_dti_d.write(('drug;target;score;hit;kegg_hit;drugBank_hit;matador_hit\n'))

        new_dti_targets = os.path.join('output/newDTI', "_".join([method, dataset,str(predict_num), "targets_new_dti.csv"]))
        out_dti_t = open(new_dti_targets, "w")
        out_dti_t.write(('drug;target;score;hit;kegg_hit;drugBank_hit;matador_hit\n'))

        drug_stats = os.path.join('output/newDTI', "_".join([method, dataset,str(predict_num), "drug_stats.csv"]))
        outd = open(drug_stats, "w")
        outd.write(('drug;hits;possible_hits;ndcg;recall;total_known_targets\n'))

        target_stats = os.path.join('output/newDTI', "_".join([method, dataset,str(predict_num), "target_stats.csv"]))
        outt = open(target_stats, "w")
        outt.write(('target;hits;possible_hits;ndcg;recall;total_known_targets\n'))
        
        self.allData = self.kegg + self.drugBank + self.matador
        self.dataset_new_interactions = set([s for s in self.allData if any(s[0] in d for d in drug_names) and any(s[1] in t.replace("hsa","hsa:") for t in target_names)])

        
        for d in drugs:
            dti_score = sz[sz[:,0] == d]
            dti_score = dti_score[dti_score[:,2].argsort()[::-1]]
            pred_dti = [(drug_names[int(dti_score[i,0])], target_names[int(dti_score[i,1])], dti_score[i,2]) for i in np.arange(0, predict_num)]
            self.novel_prediction_analysis(pred_dti, drug_names[int(d)], "NA")                
            out_dti_d.write(''.join('%s;%s;%f;%i;%i;%i;%i \n' % x for x in self.eval_dti_pairs))
            outd.write(''.join('%s;%i;%i;%f;%f;%i \n' % self.eval_drugs))                        
        print("finish: per-drug evaluation, ndcg: %f recall: %f " % (np.nanmean(self.drugs_ndcg), np.nanmean(self.drugs_recall)) )  
       
        
        
        for t in targets:
            dti_score = sz[sz[:,1] == t]            
            dti_score = dti_score[dti_score[:,2].argsort()[::-1]]
            pred_dti = [(drug_names[int(dti_score[i,0])], target_names[int(dti_score[i,1])], dti_score[i,2]) for i in np.arange(0, predict_num)]
            self.novel_prediction_analysis(pred_dti, "NA", target_names[int(t)])      
            out_dti_t.write(''.join('%s;%s;%f;%i;%i;%i;%i \n' % x for x in self.eval_dti_pairs))
            outt.write(''.join('%s;%i;%i;%f;%f;%i \n' % self.eval_targets))                        
        print("finish: per-target evaluation,  ndcg: %f recall: %f " % ( np.nanmean(self.targets_ndcg), np.nanmean(self.targets_recall)) ) 
        
        return (np.nanmean(self.drugs_ndcg),np.nanmean(self.targets_ndcg),np.nanmean(self.drugs_recall),np.nanmean(self.targets_recall))
    
    def novel_prediction_analysis(self,dti_pairs, drug, target):   
        eval_dti_pairs = []
        hit_list = []
        for num in xrange(len(dti_pairs)):
            kg_hit, db_hit, mt_hit, hit = 0,0,0,0
            d, t, score = dti_pairs[num]
            dtp = (d,t.replace("hsa","hsa:"))
            #print(dtp)
            if dtp in self.kegg:
                kg_hit = 1
            if dtp in self.drugBank:
                db_hit = 1
            if dtp in self.matador:
                mt_hit = 1
            hit = max(kg_hit,db_hit,mt_hit)
            eval_dti_pairs.append((d,t,score,hit,kg_hit,db_hit,mt_hit))
            hit_list.append(hit)
            
        self.eval_dti_pairs = eval_dti_pairs    
        if drug != "NA":
            kt_set = set([dti for dti in self.dataset_new_interactions if dti[0] == drug])
            total_known_DTI = len(kt_set)
            ndcg_d = self.ndcg(hit_list,total_known_DTI)
            self.drugs_ndcg.append(ndcg_d)
            recall_d = sum(hit_list)/float(total_known_DTI) if total_known_DTI > 0 else float('nan')
            self.drugs_recall.append(recall_d)
            self.eval_drugs = (drug,sum(hit_list),min(total_known_DTI,len(hit_list)),ndcg_d,recall_d,total_known_DTI)

        if target != "NA":
            target = target.replace("hsa","hsa:")
            kt_set = set([dti for dti in self.dataset_new_interactions if dti[1] == target])
            total_known_DTI = len(kt_set)
            ndcg_t = self.ndcg(hit_list,total_known_DTI)
            self.targets_ndcg.append(ndcg_t)
            recall_t = sum(hit_list)/float(total_known_DTI) if total_known_DTI > 0 else float('nan')
            self.targets_recall.append(recall_t)
            self.eval_targets = (target,sum(hit_list),min(total_known_DTI,len(hit_list)),ndcg_t,recall_t,total_known_DTI)
             
    def ndcg(self,hit_list,total_known_DTI):
        if total_known_DTI == 0:
            return float('nan')
        else:
            if total_known_DTI >= len(hit_list):
                ideal_list = [1 for number in xrange(len(hit_list))]
            else:
                ideal_list =[1 for number in xrange(total_known_DTI)]+[ 0  for number in xrange(len(hit_list)-total_known_DTI)]
            return rank.dcg_at_k(hit_list,len(hit_list),1)/rank.dcg_at_k(ideal_list,len(hit_list),1)


if __name__ == "__main__":
    d = new_pairs()
    d.analyse_new_known_interactions()
    
    

Execute Python Online

'''
We base the CMF implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure


[1] X. Zheng, H. Ding, H. Mamitsuka, and S. Zhu, "Collaborative matrix factorization with multiple similarities for predicting drug-target interaction", KDD, 2013.

'''
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from functions import normalized_discounted_cummulative_gain

class CMF:

    def __init__(self, K=10, lambda_l=0.01, lambda_d=0.01, lambda_t=0.01, max_iter=100):
        self.K = K
        self.lambda_l = lambda_l
        self.lambda_d = lambda_d
        self.lambda_t = lambda_t
        self.max_iter = max_iter

    def fix_model(self, W, intMat, drugMat, targetMat, seed):
        self.num_drugs, self.num_targets = intMat.shape
        self.drugMat, self.targetMat = drugMat, targetMat
        x, y = np.where(W > 0)
        self.train_drugs = set(x.tolist())
        self.train_targets = set(y.tolist())
        if seed is None:
            self.U = np.sqrt(1/float(self.K))*np.random.normal(size=(self.num_drugs, self.K))
            self.V = np.sqrt(1/float(self.K))*np.random.normal(size=(self.num_targets, self.K))
        else:
            prng = np.random.RandomState(seed)
            self.U = np.sqrt(1/float(self.K))*prng.normal(size=(self.num_drugs, self.K))
            self.V = np.sqrt(1/float(self.K))*prng.normal(size=(self.num_targets, self.K))
        self.ones = np.identity(self.K)
        last_loss = self.compute_loss(W, intMat, drugMat, targetMat)
        WR = W*intMat
        for t in xrange(self.max_iter):
            self.U = self.als_update(self.U, self.V, W, WR, drugMat, self.lambda_l, self.lambda_d)
            self.V = self.als_update(self.V, self.U, W.T, WR.T, targetMat, self.lambda_l, self.lambda_t)
            curr_loss = self.compute_loss(W, intMat, drugMat, targetMat)
            delta_loss = (curr_loss-last_loss)/last_loss
            # print "Epoach:%s, Curr_loss:%s, Delta_loss:%s" % (t+1, curr_loss, delta_loss)
            if abs(delta_loss) < 1e-6:
                break
            last_loss = curr_loss

    def als_update(self, U, V, W, R, S, lambda_l, lambda_d):
        X = R.dot(V) + 2*lambda_d*S.dot(U)
        Y = 2*lambda_d*np.dot(U.T, U)
        Z = lambda_d*(np.diag(S)-np.sum(np.square(U), axis=1))
        U0 = np.zeros(U.shape)
        D = np.dot(V.T, V)
        m, n = W.shape
        for i in xrange(m):
            # A = np.dot(V.T, np.diag(W[i, :]))
            # B = A.dot(V) + Y + (lambda_l+Z[i])*self.ones
            ii = np.where(W[i, :] > 0)[0]
            if ii.size == 0:
                B = Y + (lambda_l+Z[i])*self.ones
            elif ii.size == n:
                B = D + Y + (lambda_l+Z[i])*self.ones
            else:
                A = np.dot(V[ii, :].T, V[ii, :])
                B = A + Y + (lambda_l+Z[i])*self.ones
            U0[i, :] = X[i, :].dot(np.linalg.inv(B))
        return U0

    def compute_loss(self, W, intMat, drugMat, targetMat):
        loss = np.linalg.norm(W * (intMat - np.dot(self.U, self.V.T)), "fro")**(2)
        loss += self.lambda_l*(np.linalg.norm(self.U, "fro")**(2)+np.linalg.norm(self.V, "fro")**(2))
        loss += self.lambda_d*np.linalg.norm(drugMat-self.U.dot(self.U.T), "fro")**(2)+self.lambda_t*np.linalg.norm(targetMat-self.V.dot(self.V.T), "fro")**(2)
        return 0.5*loss

    def evaluation(self, test_data, test_label):
        ii, jj = test_data[:, 0], test_data[:, 1]
        scores = np.sum(self.U[ii, :]*self.V[jj, :], axis=1)
        self.scores = scores          
        x, y = test_data[:, 0], test_data[:, 1]
        test_data_T = np.column_stack((y,x))
        
        ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores))
        ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores))
        prec, rec, thr = precision_recall_curve(test_label, scores)
        aupr_val = auc(rec, prec)
        fpr, tpr, thr = roc_curve(test_label, scores)
        auc_val = auc(fpr, tpr)
        
        #!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!!
        return aupr_val, auc_val, ndcg, ndcg_inv

    def predict_scores(self, test_data, N):
        inx = np.array(test_data)
        return np.sum(self.U[inx[:, 0], :]*self.V[inx[:, 1], :], axis=1)

    def __str__(self):
        return "Model: CMF, K:%s, lambda_l:%s, lambda_d:%s, lambda_t:%s, max_iter:%s" % (self.K, self.lambda_l, self.lambda_d, self.lambda_t, self.max_iter)

Execute Python Online

import os
import sys
import time
import getopt
import cv_eval
from functions import *
from netlaprls import NetLapRLS
from blmnii import BLMNII
from wnngip import WNNGIP
from cmf import CMF
from brdti import BRDTI

from eval_new_DTI_prediction import *

def main(argv):
    try:
        opts, args = getopt.getopt(argv, "m:d:f:c:s:o:n:p", ["method=", "dataset=", "data-dir=", "cvs=", "specify-arg=", "method-options=", "predict-num=", "output-dir=", ])
    except getopt.GetoptError:
        sys.exit()

    data_dir = 'data'
    output_dir = 'output'
    cvs, sp_arg, model_settings, predict_num = 1, 1, [], 0

    seeds = [7771, 8367, 22, 1812, 4659]
    seedsOptPar = [156]
    # seeds = np.random.choice(10000, 5, replace=False)
    for opt, arg in opts:
        if opt == "--method":
            method = arg
        if opt == "--dataset":
            dataset = arg
        if opt == "--data-dir":
            data_dir = arg
        if opt == "--output-dir":
            output_dir = arg
        if opt == "--cvs":
            cvs = int(arg)
        if opt == "--specify-arg":
            sp_arg = int(arg)
        if opt == "--method-options":
            model_settings = [s.split('=') for s in str(arg).split()]
        if opt == "--predict-num":
            predict_num = int(arg)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    
    if not os.path.isdir(os.path.join(output_dir,"optPar")):
        os.makedirs(os.path.join(output_dir,"optPar"))    
        
    # default parameters for each methods
    if (method == 'brdti') | (method == 'inv_brdti') :
        args = {
            'D':100,
            'learning_rate':0.1,
            'max_iters' : 100,   
            'simple_predict' :False, 
            'bias_regularization':1,                 
            'global_regularization':10**(-2),  
            "cbSim": "knn",
            'cb_alignment_regularization_user' :1,                 
            'cb_alignment_regularization_item' :1}

    if method == 'netlaprls':
        args = {'gamma_d': 10, 'gamma_t': 10, 'beta_d': 1e-5, 'beta_t': 1e-5}
    if method == 'blmnii':
        args = {'alpha': 0.7, 'gamma': 1.0, 'sigma': 1.0, 'avg': False}
    if method == 'wnngip':
        args = {'T': 0.8, 'sigma': 1.0, 'alpha': 0.8}
    if method == 'cmf':
        args = {'K': 100, 'lambda_l': 0.5, 'lambda_d': 0.125, 'lambda_t': 0.125, 'max_iter': 100}
     
    #print(model_settings)    
    for key, val in model_settings:
        args[key] = float(eval(val))

    intMat, drugMat, targetMat = load_data_from_file(dataset, os.path.join(data_dir, 'datasets'))
    drug_names, target_names = get_drugs_targets_names(dataset, os.path.join(data_dir, 'datasets'))
    
    invert = 0    
    if (method == 'inv_brdti')  : 
        invert = 1
        
    if predict_num == 0:
        if cvs == 1:  # CV setting CVS1
            X, D, T, cv = intMat, drugMat, targetMat, 1             
                
        if cvs == 2:  # CV setting CVS2
            X, D, T, cv = intMat, drugMat, targetMat, 0
                
        if cvs == 3:  # CV setting CVS3
            X, D, T, cv = intMat.T, targetMat, drugMat, 0 
        

            
        cv_data = cross_validation(X, seeds, cv, invert)
        cv_data_optimize_params = cross_validation(X, seedsOptPar, cv, invert, num=5)

        
    if sp_arg == 0 and predict_num == 0:
        if (method == 'brdti'):
            cv_eval.brdti_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X, D, T, cvs, args)                             
        if (method == 'inv_brdti'):
            cv_eval.brdti_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X.T, T, D, cvs, args) 
        
        if method == 'netlaprls':
            cv_eval.netlaprls_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X, D, T, cvs, args)
        if method == 'blmnii':
            cv_eval.blmnii_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X, D, T, cvs, args)
        if method == 'wnngip':
            cv_eval.wnngip_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X, D, T, cvs, args)        
        if method == 'cmf':
            cv_eval.cmf_cv_eval(method, dataset,output_dir, cv_data_optimize_params, X, D, T, cvs, args)
    

    if sp_arg == 1 or predict_num > 0:
        tic = time.clock()
        if (method == 'brdti')|(method == 'inv_brdti'):
            model = BRDTI(args)       
        if method == 'netlaprls':
            model = NetLapRLS(gamma_d=args['gamma_d'], gamma_t=args['gamma_t'], beta_d=args['beta_t'], beta_t=args['beta_t'])
        if method == 'blmnii':
            model = BLMNII(alpha=args['alpha'], gamma=args['gamma'], sigma=args['sigma'], avg=args['avg'])
        if method == 'wnngip':
            model = WNNGIP(T=args['T'], sigma=args['sigma'], alpha=args['alpha'])        
        if method == 'cmf':
            model = CMF(K=args['K'], lambda_l=args['lambda_l'], lambda_d=args['lambda_d'], lambda_t=args['lambda_t'], max_iter=args['max_iter'])
        cmd = str(model)
        
        #predict hidden part of the current datasets
        if predict_num == 0:
            print "Dataset:"+dataset+" CVS:"+str(cvs)+"\n"+cmd
            if (method == 'inv_brdti') : 
                aupr_vec, auc_vec, ndcg_inv_vec, ndcg_vec, results = train(model, cv_data, X.T, T, D)
            else:
                aupr_vec, auc_vec, ndcg_vec, ndcg_inv_vec, results = train(model, cv_data, X, D, T)
            aupr_avg, aupr_conf = mean_confidence_interval(aupr_vec)
            auc_avg, auc_conf = mean_confidence_interval(auc_vec)
            ndcg_avg, ndcg_conf = mean_confidence_interval(ndcg_vec)
            ndcg_inv_avg, ndcg_inv_conf = mean_confidence_interval(ndcg_inv_vec)
            
            resfile = os.path.join('output','rawResults', method+"_res_"+str(cvs)+"_"+dataset+".csv")
            outd = open(resfile, "w")
            outd.write(('drug;target;true;predict\n'))
            
            for r in results:
                outd.write('%s;%s;%s;%s\n' % (r[0],r[1],r[2],r[3]) )
            
            print "auc:%.6f, aupr: %.6f, ndcg: %.6f, ndcg_inv: %.6f, auc_conf:%.6f, aupr_conf:%.6f, ndcg_conf:%.6f, ndcg_inv_conf:%.6f, Time:%.6f" % (auc_avg, aupr_avg, ndcg_avg, ndcg_inv_avg, auc_conf, aupr_conf, ndcg_conf, ndcg_inv_conf, time.clock()-tic)
            write_metric_vector_to_file(auc_vec, os.path.join(output_dir, method+"_auc_cvs"+str(cvs)+"_"+dataset+".txt"))
            write_metric_vector_to_file(aupr_vec, os.path.join(output_dir, method+"_aupr_cvs"+str(cvs)+"_"+dataset+".txt"))            
            write_metric_vector_to_file(ndcg_vec, os.path.join(output_dir, method+"_ndcg_cvs"+str(cvs)+"_"+dataset+".txt"))
            write_metric_vector_to_file(ndcg_inv_vec, os.path.join(output_dir, method+"_ndcg_inv_cvs"+str(cvs)+"_"+dataset+".txt"))
        
        #predict novel DTIs    
        elif predict_num > 0:
            print "Dataset:"+dataset+"\n"+cmd
            seed = 376
            if invert: #predicting drugs for targets
                model.fix_model(intMat.T, intMat.T, targetMat, drugMat, seed)
                npa = newDTIPrediction()
                x, y = np.where(intMat == 0)
                scores = model.predict_scores(zip(y, x), 1)
                sz = np.array(zip(x,y,scores))    
                
            else: #predicting targets for drugs
                model.fix_model(intMat, intMat, drugMat, targetMat, seed)
                npa = newDTIPrediction()
                x, y = np.where(intMat == 0)
                scores = model.predict_scores(zip(x, y), 1)
                sz = np.array(zip(x,y,scores))
                
            ndcg_d, ndcg_t, recall_d, recall_t = npa.verify_novel_interactions(method, dataset, sz, predict_num, drug_names, target_names)
            
            st_file= os.path.join('output/newDTI', "_".join([dataset,str(predict_num), "stats.csv"]))
            out = open(st_file, "a")
            out.write(('%s;%f;%f;%f;%f\n' % (method,ndcg_d, ndcg_t, recall_d, recall_t)))

            

if __name__ == "__main__":  
      
    """  
    main(['--method=blmnii', '--dataset=davis', '--cvs=1', '--specify-arg=1', '--method-opt=alpha=0.6' ])    
    main(['--method=brdti', '--dataset=gpcr', '--cvs=1', '--specify-arg=0'])
    """    

www.waleed.com

num = 16
if num < 0:
   print("Enter a positive number")
else:
   sum = 0
   while(num > 0):
       sum += num
       num -= 1
   print("The sum is",sum)
   
   

title

# Hello World program in Python
    
print "Hello World!\n"

FindZiter

# Hello World program in Python
    
print "Hello World!\n"

Advertisements
Loading...

We use cookies to provide and improve our services. By using our site, you consent to our Cookies Policy.