text
stringlengths 1
93.6k
|
---|
return breakPoints, plotPoints
|
#Run cross-validation up to Kmax for a set of lambdas
|
#Return: train and test set likelihood for every K, lambda
|
def GGSCrossVal(data, Kmax=25, lambList = [0.1, 1, 10], features = [], verbose = False):
|
data = data.T
|
if (features == []):
|
features = range(data.shape[1])
|
data = data[:,features]
|
origSize, n = data.shape
|
np.random.seed(0)
|
ordering = range(origSize)
|
random.shuffle(ordering)
|
trainTestResults = []
|
#For each lambda, run the 10 folds in parallel
|
numProcesses = min(multiprocessing.cpu_count(),10 )
|
pool = multiprocessing.Pool(processes = numProcesses)
|
for lamb in lambList:
|
mseList = []
|
trainList = []
|
returnList = pool.map(multi_run_wrapper, [(0,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(1,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(2,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(3,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(4,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(5,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(6,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(7,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(8,data, Kmax, lamb, verbose, origSize, n, ordering),
|
(9,data, Kmax, lamb, verbose, origSize, n, ordering)])
|
#Accumulate results
|
for i in range(10):
|
for j in returnList[i][0]:
|
mseList.append(j)
|
for j in returnList[i][1]:
|
trainList.append(j)
|
#Get average of the 10 folds
|
plotVals = map(list, zip(*mseList))
|
maxBreaks = max(plotVals[0])+1
|
testAvg = []
|
for i in range(maxBreaks):
|
num = 0
|
runsum = 0
|
for j in range(len(plotVals[0])):
|
if (plotVals[0][j] == i):
|
runsum = runsum + plotVals[1][j]
|
num = num + 1
|
testAvg.append(float(runsum)/num)
|
plotVals2 = map(list, zip(*trainList))
|
trainAvg = []
|
for i in range(maxBreaks):
|
num = 0
|
runsum = 0
|
for j in range(len(plotVals2[0])):
|
if (plotVals[0][j] == i):
|
runsum = runsum + plotVals2[1][j]
|
num = num + 1
|
trainAvg.append(float(runsum)/num)
|
#Combine results for all lambdas into one list and return that
|
trainTestResults.append((lamb, (trainAvg, testAvg)))
|
return trainTestResults
|
#Find and return the means/regularized covariance of each segment for a given set of breakpoints
|
def GGSMeanCov(data, breakpoints, lamb, features = [], verbose = False):
|
data = data.T
|
#Select the desired features
|
if (features == []):
|
features = range(data.shape[1])
|
data = data[:,features]
|
m,n = data.shape
|
numSegments = len(breakpoints) - 1
|
mean_covs = []
|
for i in range(numSegments):
|
#Get mean and regularized covariance of current segment
|
tempData = data[breakpoints[i]:breakpoints[i+1],:]
|
m,n = tempData.shape
|
empMean = np.mean(tempData, axis=0)
|
empCov = np.cov(tempData.T,bias = True)
|
regularizedCov = empCov + float(lamb)*np.identity(n)/m
|
mean_covs.append((empMean, regularizedCov))
|
return mean_covs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.