text
stringlengths
1
93.6k
#HELPER FUNCTIONS
def calculateLikelihood(data, breaks,lamb):
ll = 0
for i in range(len(breaks) - 1):
tempData = data[breaks[i]:breaks[i+1],:]
m,n = tempData.shape
empCov = np.cov(tempData.T,bias = True)
ll = ll - (m*np.linalg.slogdet(empCov + float(lamb)*np.identity(n)/m)[1] - float(lamb) * np.trace(np.linalg.inv(empCov + float(lamb)*np.identity(n)/m)))
return ll
def addBreak(data, lamb):
#Initialize parameters
m,n = data.shape
origMean = np.mean(data, axis=0)
origCov = np.cov(data.T,bias = True)
origLL = m*np.linalg.slogdet(origCov + float(lamb)*np.identity(n)/m)[1] - float(lamb) * np.trace(np.linalg.inv(origCov + float(lamb)*np.identity(n)/m))
totSum = m*(origCov+np.outer(origMean,origMean))
muLeft = data[0,:]/n
muRight = (m * origMean - data[0,:])/(m-1)
runSum = np.outer(data[0,:],data[0,:])
#Loop through all samples, find point where breaking the segment would have the largest LL increase
minLL = origLL
minInd = 0
for i in range(2,m-1):
#Update parameters
runSum = runSum + np.outer(data[i-1,:],data[i-1,:])
muLeft = ((i-1)*muLeft + data[i-1,:])/(i)
muRight = ((m-i+1) * muRight - data[i-1,:])/(m-i)
sigLeft = runSum/(i) - np.outer(muLeft, muLeft)
sigRight = (totSum - runSum)/(m-i) - np.outer(muRight,muRight)
#Compute Cholesky, LogDet, and Trace
Lleft = np.linalg.cholesky(sigLeft + float(lamb)*np.identity(n)/i)
Lright = np.linalg.cholesky(sigRight + float(lamb)*np.identity(n)/(m-i))
llLeft = 2*sum(map(math.log, np.diag(Lleft)))
llRight = 2*sum(map(math.log, np.diag(Lright)))
(trLeft, trRight) = (0,0)
if(lamb > 0):
trLeft = math.pow(np.linalg.norm(np.linalg.inv(Lleft)),2)
trRight = math.pow(np.linalg.norm(np.linalg.inv(Lright)),2)
LL = i*llLeft - float(lamb)*trLeft + (m-i)*llRight - float(lamb)*trRight
#Keep track of the best point so far
if(LL < minLL):
minLL = LL
minInd = i
#Return break, increase in LL
return (minInd,minLL-origLL)
def adjustBreaks(data, breakpoints, newInd, lamb = 0, verbose = False, maxShuffles = 250):
bp = breakpoints[:]
random.seed(0)
#Just one breakpoint, no need to adjust anything
if (len(bp) == 3):
return bp
#Keep track of what breakpoints have changed, so that we don't have to adjust ones which we know are constant
lastPass = dict()
thisPass = dict()
for b in bp:
thisPass[b] = 0
for i in newInd:
thisPass[i] = 1
for z in range(maxShuffles):
lastPass = dict(thisPass)
thisPass = dict()
for b in bp:
thisPass[b] = 0
switchAny = False
ordering = range(1,len(bp) - 1)
random.shuffle(ordering)
for i in ordering:
#Check if we need to adjust it
if(lastPass[bp[i-1]] == 1 or lastPass[bp[i+1]] == 1 or thisPass[bp[i-1]] == 1 or thisPass[bp[i+1]] == 1):
tempData = data[bp[i-1]:bp[i+1], :]
ind, val = addBreak(tempData, lamb)
if (bp[i] != ind + bp[i-1] and val != 0):
lastPass[ind+bp[i-1]] = lastPass[bp[i]]
del lastPass[bp[i]]
del thisPass[bp[i]]
thisPass[ind+bp[i-1]] = 1
if (verbose == True):
print "Moving", bp[i], "to", ind+bp[i-1], "length = ", tempData.shape[0], ind
bp[i] = ind + bp[i-1]
switchAny = True
if (switchAny == False):
return bp
return bp
def multi_run_wrapper(args):
return oneFold(*args)
def oneFold(fold, data, breakpoints, lamb, verbose, origSize, n, ordering):
# Remove 10% of data for test set
mseList = []
trainList = []
testSet = np.sort(ordering[(fold)*origSize/10:(fold+1)*origSize/10])
mask = np.ones(origSize, dtype=bool)
mask[testSet] = False
trainData = data[mask,:]
# Solve for test and train error