text
stringlengths 1
93.6k
|
---|
testSize = len(testSet)
|
trainSize = origSize - testSize
|
bp = GGS(trainData.T, breakpoints, lamb, [], verbose)[0]
|
for z in bp:
|
i = z
|
(mse, currBreak) = (0, 1)
|
temp = trainData[0:i[1]]
|
empMean = np.mean(temp, axis=0)
|
empCov = np.cov(temp.T,bias = True) + float(lamb)*np.identity(n)/temp.shape[0]
|
invCov = np.linalg.inv(empCov)
|
#Calculate test error
|
for j in range(testSize):
|
#Find which break it's in
|
adj = testSet[j] - j
|
cb = max(sum(1 for k in i if k < adj),1)
|
if (currBreak != cb):
|
currBreak = cb
|
temp = trainData[i[currBreak-1]:i[currBreak]]
|
empMean = np.mean(temp, axis=0)
|
empCov = np.cov(temp.T,bias = True) + float(lamb)*np.identity(n)/temp.shape[0]
|
invCov = np.linalg.inv(empCov)
|
#Compute likelihood
|
ldet = 0.5*np.linalg.slogdet(invCov)[1]
|
ll = ldet - 0.5*(data[testSet[j]] - empMean).dot(invCov).dot((data[testSet[j]] - empMean)) - n*math.log(2*math.pi)/2
|
mse = mse+ll
|
mseList.append((len(i)-2, mse/testSize))
|
#Calculate training error
|
tErr = 0
|
currBreak = 1
|
temp = trainData[0:i[1]]
|
empMean = np.mean(temp, axis=0)
|
empCov = np.cov(temp.T,bias = True) + float(lamb)*np.identity(n)/temp.shape[0]
|
invCov = np.linalg.inv(empCov)
|
for j in range(1,trainSize):
|
if(j in i):
|
currBreak = currBreak + 1
|
temp = trainData[i[currBreak-1]:i[currBreak]]
|
empMean = np.mean(temp, axis=0)
|
empCov = np.cov(temp.T,bias = True) + float(lamb)*np.identity(n)/temp.shape[0]
|
invCov = np.linalg.inv(empCov)
|
#Compute likelihood
|
ldet = 0.5*np.linalg.slogdet(invCov)[1]
|
ll = ldet - 0.5*(trainData[j] - empMean).dot(invCov).dot((trainData[j] - empMean)) - n*math.log(2*math.pi)/2
|
tErr = tErr+ll
|
trainList.append((len(i)-2, tErr/trainSize))
|
return mseList, trainList
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.