#!/usr/bin/env python

from brainlab import *
import sys, socket
# for poisson:
from numarray.random_array import *
import numarray.convolve as conv
# I believe all random functions are numarray random functions now--random.x() has
# been replaced by x() everywhere, which should use numarray.random_array fxns.
# Importing basic python random package as stdrandom allows use of python's regular
# random fxns too if desired:
import random as stdrandom
#import math

import threading        # for parallel ga evaluation
from scipy import ga

alllock=threading.Lock()
NumComputeNodesToUse=26
mychrom=[]
fignum=1

# global locking for parallel evaluation in threads by GA

def lock():
    alllock.acquire() 

def unlock():
    alllock.release()

def incthreadnum():
    global alllock
    global threadnum
    alllock.acquire() 
    threadnum+=1
    tn=threadnum
    alllock.release()
    return tn

def decthreadnum():
    global alllock
    global threadnum
    alllock.acquire() 
    threadnum-=1
    tn=threadnum
    alllock.release()
    return tn

def getthreadnum():
    global alllock
    global threadnum
    alllock.acquire() 
    tn=threadnum
    alllock.release()
    return threadnum

def RandSpikeList(duration, interspike, starttime=0.0, type=None, pmean=25):
    s=[]
    if type=='poisson': 
        pn=duration*pmean*2
        #a=RandomArray.poisson(pmean, pn)      # returns 50 int elements with poisson mean pmean (==stddev).  type 'array'
        i=0
        time=starttime
        time+=float(poisson(pmean)/1000.0)
        while time < (starttime+duration):
            s.append(time)
            i+=1
            time+=float(poisson(pmean)/1000.0)
        return s

    time=starttime
    time+=int((random() * interspike)+10.0)/1000.0
    while time < (starttime+duration):
        s.append(time)
        time+=int((random() * interspike)+10.0)/1000.0
    return s

    pass

    pass

    """
    """
    L1=LAYER({"TYPE":"L1"})
    L2=LAYER({"TYPE":"L2"})
    L3=LAYER({"TYPE":"L3"})
    L4=LAYER({"TYPE":"L4"})
    L5=LAYER({"TYPE":"L5"})
    L6=LAYER({"TYPE":"L6"})

    C=COLUMN()
    c.AddLayerType(L1)
    c.AddLayerType(L2)
    c.AddLayerType(L3)
    c.AddLayerType(L4)
    c.AddLayerType(L5)
    c.AddLayerType(L6)

    #c.AddConnect(L1, 

    c.AddConnect(L2, L5)

    #c.AddConnect(L2, layer 6 of next 'higher' col)
    #c.AddConnect(L3, layer 6 of next 'higher' col)

    c.AddConnect(L3, L5)

    c.AddConnect(L4, L2)
    c.AddConnect(L4, L3)

    #c.AddConnect(L5, to thalamus)
    c.AddConnect(L5, L6)    # check this

    c.AddConnect(L6, L4)
    c.AddConnect(L6, L1)    # L1 is 'common' layer, all to all connects.  connect to L1 of ALL other cols

    #ThalCol.AddConnect(

def BCol2(newb, name, parms, emsize=10,
        #otherlaysize=10, DoInhib=True, l1emprob=None, eremprob=None, eresprob=None,
        #eri1prob=None, i1esprob=None, esi1prob=None, emesprob=None, esemprob=None, i1emprob=None, emi1prob=None, i1i1prob=None,
        #esesprob=None, ememprob=None,
        synE="E", synI="I", synprobs=None, w=100, h=400):
    """
        this uses inhib cells in their own layer; would be more realistic to
        put them as a different cell group in all/some other layers

        future:
            split out into proper 6 layer col, not compressed 4 layer col
            split out all connection probabilities so that some connections
                can be weakened, or event eliminated, if that works better
    """
    x, y=(0, 0)
    c=newb.COLUMN({"TYPE":name, "_WIDTH":w, "_HEIGHT":h, "_XLOC":x, "_YLOC":y})
    newb.AddColumn(c)

    # get a basic cell named cellE that is a copy of the ES type:
    # can't use just 'E' since it is also a name of standard synapse
    ecell=newb.Copy(newb.celltypes, "ES", "cellE")
    icell=newb.Copy(newb.celltypes, "I1", "cellI")

    l1=newb.LAYER({"TYPE":"layL1", "_UPPER":100, "_LOWER":80})
    c.AddLayerType(l1)
    l1.AddCellType(ecell, parms['otherlaysize']) 

    em=newb.LAYER({"TYPE":"layEM", "_UPPER":75, "_LOWER":55})
    c.AddLayerType(em)
    em.AddCellType(ecell, emsize) 

    er=newb.LAYER({"TYPE":"layER", "_UPPER":50, "_LOWER":30})
    c.AddLayerType(er)
    er.AddCellType(ecell, parms['otherlaysize']) 

    es=newb.LAYER({"TYPE":"layES", "_UPPER":25, "_LOWER":5})
    c.AddLayerType(es)
    es.AddCellType(ecell, parms['otherlaysize']) 

    # hmm, spread these out in the different layers?
    i1=newb.LAYER({"TYPE":"layI", "_UPPER":65, "_LOWER":45})          # common inhib cells for entire layer.
    c.AddLayerType(i1)
    i1.AddCellType(icell, parms['otherlaysize']) 

    bprob=.6        # was .1
    iprob=.6        # was .0134

    se=newb.syntypes[synE]
    si=newb.syntypes[synI]

    # for these connections, leaving name out of parms or with parm set as None results in bprob:
    for (frm, to, p, defprob) in [(l1, em, 'l1emprob', bprob),
                                  (er, em, 'eremprob', bprob),
                                  (er, es, 'eresprob', bprob),
                                  (em, es, 'emesprob', bprob),
                                  (es, em, 'esemprob', bprob),
                                  (es, es, 'esesprob', bprob),
                                  (em, em, 'ememprob', bprob)]:
        if p not in parms or parms[p] is None:
            c.AddConnect(frm, to, syn=se, prob=defprob)
        else:
            c.AddConnect(frm, to, syn=se, prob=parms[p])

    if parms['DoInhib']==True:
        print "doing inhib"
        for (frm, to, p, defprob, syn) in [(er, i1, 'eri1prob', bprob, se),
                                           (es, i1, 'esi1prob', bprob, se),
                                           (em, i1, 'emi1prob', bprob, se),
                                           (i1, es, 'i1esprob', iprob, si),
                                           (i1, em, 'i1emprob', iprob, si),
                                           (i1, i1, 'i1i1prob', iprob, si)]:
            if p not in parms or parms[p] is None:
                c.AddConnect(frm, to, syn=syn, prob=defprob)
            else:
                c.AddConnect(frm, to, syn=syn, prob=parms[p])

    # these are newer connections, and we don't even make them (with a default prob) if they==None in parms, or not in parms
    if parms['L1L5prob'] is not None:
        c.AddConnect(l1, es, syn=se, prob=parms['L1L5prob'])
    if parms['L6L4prob'] is not None:
        c.AddConnect(es, er, syn=se, prob=parms['L6L4prob'])

    return c

def pprint(l):
    print "[",
    for j in l:
        print "%.3f" % float(j), 
    print "]",

def E0ProcessData(brainname, numdatastims, stiminterval, repname, datastimpat, keystimpat, dosave=True, ncells=10, doshow=True):
    oo=[]
    ts=0.0
    if 1:
        """
         it is workable to call LoadSpikeData on each time range,
         but for a long report file it is costly to rescan the file repeatedly
         looking for different time ranges
            another option would be to issue a separate report request for each time range . . .
        """
        if 0:
            while ts < numdatastims*stiminterval:
                print "loading for time", ts
                oo.append(LoadSpikeData(brainname, repname, starttime=ts, endtime=ts+stimdur))
                ts+=stiminterval
            for i in range(0, ncells):
                for o in oo: 
                    pprint(o[i]); print
                print
                #SpikePatternPlot(o[i])
        elif datastimpat!=None or keystimpat!=None:     # new method that looks at the stimpat lists
            oo=LoadSpikeData(brainname, repname)
            ts=0.0
            numdpats=max(datastimpat)+1
            numkpats=max(keystimpat)+1
            if numdpats != numkpats:
                print "number of key and data stim patterns should agree", numdpats, numkpats
                sys.exit(0)
            windo=0
            ewind=[]
            for w in range(0, numdpats):  ewind.append(0)
            startfig=getfignum()
            addfignum(ncells)
            for kp, dp in map(None, datastimpat, keystimpat):
                te=ts+stiminterval
                print "time", ts, "to", te, "key, data", kp, dp
                # keep one plot for each cell, showing each pattern in a subplot on that plot
                # the test inputs where only data or only key is present is shown in a different color
                # on the subplot where it would ideally belong if memorization were occuring
                # each pattern number is on a separate subplot; when input patterns for key and data
                # are different, subplot is the one input that is present
                if kp==dp:      # training input
                    #print "key and data same", kp
                    if kp >=0:
                        windo=ewind[kp]; ewind[kp]+=1
                        #print "incd", kp
                        for c in range(0, ncells):
                            figure(startfig+c, figsize=(8,6))
                            ax=subplot(numdpats, 1, kp+1)
                            fs=oo[c]
                            sp=[s-ts for s in fs if s >= ts and s < te]
                            if len(sp) > 0:
                                scatter(sp, len(sp)*[windo], c='r', s=16)
                            else:
                                print "skipped empty list for", windo
                elif kp >=0 and dp < 0:
                    #print "key, but no data"
                    windo=ewind[kp]; ewind[kp]+=1
                    #print "incd", kp
                    for c in range(0, ncells):
                        figure(startfig+c)
                        ax=subplot(numdpats, 1, kp+1)
                        fs=oo[c]
                        sp=[s-ts for s in fs if s >= ts and s < te]
                        #print sp
                        if len(sp) > 0:
                            scatter(sp, len(sp)*[windo], c='g', s=32, marker="d")
                        else:
                            print "skipped empty list for", windo
                elif kp < 0 and dp >=0:
                    #print "data, but no key"
                    windo=ewind[dp]; ewind[dp]+=1
                    #print "incd", dp
                    for c in range(0, ncells):
                        figure(startfig+c)
                        ax=subplot(numdpats, 1, dp+1)
                        fs=oo[c]
                        sp=[s-ts for s in fs if s >= ts and s < te]
                        #print sp
                        if len(sp) > 0:
                            scatter(sp, len(sp)*[windo], c='b', s=32, marker="s")
                        else:
                            print "skipped empty list for", windo
                windo+=1
                ts=te
            # set axes
            for c in range(0, ncells):
                figure(startfig+c)
                for p in range(0, numdpats):
                    subplot(numdpats, 1, p+1)
                    title("Brain: "+brainname+" "+repname+" Cell "+`c`+" Stim pattern "+`p+1`, fontsize='small')
                    if p < numdpats-1:
                        set(gca(), 'xticklabels', [])
                    xlim(0.0, stiminterval)
                    #ax.set_ylim([0, len(datastimpat)*stiminterval])
                    #ax.set_ylim([0, ewind[p]])
                    ylim(-1, ewind[p]+1)
                    grid(True)
            if dosave!=False:
                for cell in range(0, ncells):
                    figure(startfig+cell)
                    n=brainname+"_"+repname+"fig"+`cell+1`
                    #savefig(n)
                    if dosave=="ps":
                        #savefig(n, orientation="landscape")
                        #savefig(n+".ps", orientation="landscape")
                        savefig(n+".ps")
                    else:
                        savefig(n, dpi=150)
                    print "saved plot", n
        else:   # old method that assumed pattern was strictly repetitive
            print "Old method of specifying stim patterns no longer supported."
            sys.exit(0)
        return

def E0PrepStim(newb, datain, datastimpat, keyin, keystimpat, stimdur, stiminterval, type=None, interspike=100):
    """create the stim sequences that will be applied repeatedly
    """

    # datain[]:     the list of cell names for the data input area
    # datastimpat:  the list for the entire run of which pattern to apply for each stimdur of stiminterval

    numdstim=max(datastimpat)+1       # how many *unique* patterns are there?  (named 0 through this numdstim-1)
    #print "numdstim", numdstim
    datainstim=[]       # will contain a stim list for each cell datainstim[cellnum][patnum].  all zero time based
    for i in range(0, len(datain)):  # for each cell in area
        tc=[]
        datainstim.append(tc) 
        for j in range(0, numdstim):  # for each unique pattern there is 
            sn=RandSpikeList(stimdur, interspike, starttime=0.0, type=type)
            #print >> sys.stderr, "datastim", i, "has", len(sn), "spikes"
            tc.append(sn)

    numkstim=max(keystimpat)+1
    #print "numkstim", numkstim
    keyinstim=[]        # will contain a stim list for each cell
    for i in range(0, len(keyin)):
        tc=[]
        keyinstim.append(tc)
        for j in range(0, numkstim):  # for each unique pattern there is
            sn=RandSpikeList(stimdur, interspike, starttime=0.0, type=type)
            #print >> sys.stderr, "keystim", i, "has", len(sn), "spikes"
            tc.append(sn)
    
    # now apply the patterns repeatedly.
    if 1:
        stimstart=0.0
        pn=0
        for p in datastimpat:        # for each repetition of stim
            if p >= 0:              # pattern of -1 means don't apply stim here
                for i in range(0, len(datain)):      # for each cell in the input area, apply pattern p
                    n=datain[i]
                    s=datainstim[i][p]
                    ns=[v+stimstart for v in s]     # shift each spike by stimstart
                    #print "applying pattern", p
                    newb.AddSpikeTrainPulseStim("stimdatain"+`i`+"_"+`pn`, n, n+"_1CELL", "ER", ns)
                    pn+=1
            stimstart+=stiminterval

    if 1:
        stimstart=0.0
        pn=0
        for p in keystimpat:         # for each repetition of stim
            if p >= 0:              # pattern of -1 means don't apply stim here
                for i in range(0, len(keyin)):       # for each cell in the input area
                    n=keyin[i]
                    s=keyinstim[i][p]
                    ns=[v+stimstart for v in s]     # shift each spike by stimstart
                    #print "applying pattern", p
                    newb.AddSpikeTrainPulseStim("stimkeyin"+`i`+"_"+`pn`, n, n+"_1CELL", "ER", ns)
                    pn+=1
            stimstart+=stiminterval

    if 0:
        # to see what the stim looks like
        for stimnum in range(0, len(datainstim)):
            d=datainstim[stimnum]
            c=0
            figure(100)
            for celldata in range(0, len(d)):
                if len(celldata) > 0:
                    scatter(d, len(d)*[c])
                c+=1
            # only doing first stimnum for now
            show()
            print "exiting after display in E0PrepStim"
            sys.exit(0)

def E0PrepStimDistinct(newb, datain, datastimpat, keyin, keystimpat, stimdur, stiminterval, type=None, interspike=100):
    """ create the stim sequences that will be applied repeatedly
        new version that tries to make sure stims are very distinct
        only one of the stim patterns of each type (key, data) will have a spike at a given time
    """

    # datain[]:     the list of cell names for the data input area
    # datastimpat:  the list for the entire run of which pattern to apply for each stimdur of stiminterval

    numdstim=max(datastimpat)+1       # how many *unique* patterns are there?  (named 0 through this numdstim-1)
    #print "numdstim", numdstim
    datainstim=[]       # will contain a stim list for each cell datainstim[cellnum][patnum].  all zero time based
    for i in range(0, len(datain)):  # for each cell in area
        tc=[]
        datainstim.append(tc) 
        for j in range(0, numdstim):  # for each unique pattern there is 
            sn=RandSpikeList(stimdur, interspike, starttime=0.0, type=type)
            #print >> sys.stderr, "datastim", i, "has", len(sn), "spikes"
            tc.append(sn)

    numkstim=max(keystimpat)+1
    #print "numkstim", numkstim
    keyinstim=[]        # will contain a stim list for each cell
    for i in range(0, len(keyin)):
        tc=[]
        keyinstim.append(tc)
        for j in range(0, numkstim):  # for each unique pattern there is
            sn=RandSpikeList(stimdur, interspike, starttime=0.0, type=type)
            #print >> sys.stderr, "keystim", i, "has", len(sn), "spikes"
            tc.append(sn)
    
    # now apply the patterns repeatedly.
    if 1:
        stimstart=0.0
        pn=0
        for p in datastimpat:        # for each repetition of stim
            if p >= 0:              # pattern of -1 means don't apply stim here
                for i in range(0, len(datain)):      # for each cell in the input area, apply pattern p
                    n=datain[i]
                    s=datainstim[i][p]
                    ns=[v+stimstart for v in s]     # shift each spike by stimstart
                    #print "applying pattern", p
                    newb.AddSpikeTrainPulseStim("stimdatain"+`i`+"_"+`pn`, n, n+"_1CELL", "ER", ns)
                    pn+=1
            stimstart+=stiminterval

    if 1:
        stimstart=0.0
        pn=0
        for p in keystimpat:         # for each repetition of stim
            if p >= 0:              # pattern of -1 means don't apply stim here
                for i in range(0, len(keyin)):       # for each cell in the input area
                    n=keyin[i]
                    s=keyinstim[i][p]
                    ns=[v+stimstart for v in s]     # shift each spike by stimstart
                    #print "applying pattern", p
                    newb.AddSpikeTrainPulseStim("stimkeyin"+`i`+"_"+`pn`, n, n+"_1CELL", "ER", ns)
                    pn+=1
            stimstart+=stiminterval

    if 0:
        # to see what the stim looks like
        # what are we looking at here?
        c=0
        for d in datainstim:
            figure(100)
            if len(d) > 0:
                scatter(d, len(d)*[c])
            c+=1
        show()
        sys.exit(0)

    if 0:
        # to see what the stim looks like, for research proposal

        figure()
        ns=len(datainstim)
        fr=(1.0-.1)/ns
        frs=.7*fr   # height of each set of axes
        for stimnum in range(0, ns):
            print "stimnum", stimnum
            if stimnum==0:
                #axo=subplot(ns+1, 1, stimnum+1)
                axo=axes([.1, (ns-stimnum)*fr - fr +.1, .8, frs])   # the +.1 leaves space for x label on bottom
                ax=axo
            else:
                axo=axes([.1, (ns-stimnum)*fr - fr +.1, .8, frs], sharex=ax)
                #axo=subplot(ns+1, 1, stimnum+1, sharex=ax)
            if stimnum!=ns-1:
                set(axo.get_xticklabels(), visible=False)
            else:
                axo.set_xlabel("Time (sec)")
            d=datainstim[stimnum]
            ncells=len(d)
            c=0
            for cellnum in range(0, ncells):
                print "cell", cellnum
                celldata=d[cellnum]
                if len(celldata) > 0:
                    scatter(celldata, len(celldata)*[cellnum])
                c+=1
            # only doing first stimnum for now
            print "showing!"
            # put extra space at top and bot of y axis so spikes there are not cut off:
            # e.g. ncells is 4, numbered 0,1,2,3 want axes -1 to 4
            axo.set_ylim((-1, ncells))
            # remove the top and bot label on y axis 
            # figure out the pythonic way to do this later:
            #labels=axo.get_yticklabels()
            #print "labels are", labels
            #axo.set_yticklabels(labels)    # we want the tick marks, but not the labels
            #locs, labels=yticks()
            #print "labels are", labels
            #print "locs are", locs
            labels=["%s" %d for d in range(-1, ncells+1)]
            # why can't get labels via yticks?
            labels[0]=""; labels[-1]=""
            #print ncells, labels; sys.exit(0)
            axo.set_yticklabels(labels)
            #yticks(locs, labels)
            #print labels
            axo.set_ylabel("Cell #")
            axo.set_title("Stimulus pattern %d" %(stimnum))
        #show()
        savefig("t.ps")
        print "exiting after display in E0PrepStim"
        sys.exit(0)

def E0CheckResults(brainname, repname, stimdur, stiminterval, keystimpat, datastimpat, prfstimpat, norm=False, dosave=False):
    """ construct binned arrays with 1 at spikes 
        stimdur is actualy len of applied stimulusl
        stiminterval is time between beginnings of successive stim applications (will be >= stimdur)

        for now, assume there is only one match template tagged in prfstimpat for each pattern number;
    """
    # since spikes are 2.5 ms wide, setting a bintime of .0025 should result in at most
    # one spike per template bin; in reality, some bins get two spikes.  why is that?
    # probably because spikes are exactly 2.5 ms apart.  setting bintime to .0020 does
    # in fact result in at most one spike per template bin
    #bintime=.0025         # size of bin, in seconds
    bintime=.0020         # size of bin, in seconds
    #bintime=.01           # size of bin, in seconds

    # for now, we will match only the time actually receiving stim.  maybe better to shift it a bit
    # since output lags input by a couple of synapse times
    #matchinterval=stimdur
    matchinterval=stimdur+.02

    oo=LoadSpikeData(brainname, repname)
    numpats=max(keystimpat)+1 
    starttime=0.0
    mp=[]       # 'match patterns' for a pattern number
    endtime=starttime+stiminterval
    nbins=int(matchinterval/bintime)
    numcells=len(oo)
    print "bins per match template: "+`nbins`+", each is", bintime, "s, for a match interval of", matchinterval, "s, #cells",numcells
    print "numpats",numpats
    for i in range(0, numpats):
        ar=zeros((numcells, nbins))
        mp.append(ar)

    if 0:
        mt0=mp[0]      # select pattern 0
        print mt0[1]   # select cell 1 in that pattern
        sys.exit(0)

    # now construct the template based on the part of the stimpattern for each stim
    # remember that each template really is a number of cells wide and we match on all of them (maybe)
    # the M match template will usually be the last of the training runs for each pattern pair number
    ts=0.0
    te=stiminterval
    for (t, n) in prfstimpat:
        if t=='M':
            print "found a match template for pattern", n, "at time",ts,"duration",matchinterval
            celln=0
            for cellsp in oo: 
                matchend=ts+matchinterval
                a=[s-ts for s in cellsp if s >= ts and s < matchend]       # matchinterval, since only looking at part of stiminterval
                # now we have a list of spike times in the output for that cell, increment the appropriate bin: 
                ar=mp[n]        # later, check for multiple match patterns.  average them?
                for spiketime in a:
                    binnum=int(spiketime/bintime)-1
                    #print "time", spiketime, "in bin", binnum
                    ar[celln,binnum]+=1
                celln+=1
        ts=te
        te=ts+stiminterval

    # at this point, mp[n] contains the match pattern (what is tagged in prfstimpat as 'M') for
    # pattern n.  This is independent of the order that each stim pattern might appear in the
    # actual prfstimpat.  mp[n][c] is the array for cell #c

    if 0:
        for p in range(0, numpats):
            #print "\n",p, mp[p]
            print "\n",p, mp[p][0]
        sys.exit(0)

    if dosave!=False:
        nextfigure()
        for p in range(0, numpats):
            #subplot(numpats, 1, p+1)
            ax=mysubplot(numpats, 1, p+1)
            #x,y=mp[p].shape
            #print x,y      # 4 260
            # convert bins to a scatter-plot-able list of x points where >=1
            for cn in range(0, numcells):
                cp=mp[p][cn]
                pa=nonzero(cp)
                l=len(pa)
                if l > 0:
                    scatter(pa, [cn]*l)
            # the y axis scaling in imshow makes it unpleasant to work with
            # USE interpolation='nearest'
            #imshow(mp[p], aspect='preserve')
            #imshow(mp[p], interpolation=None)
            #l=len(c0)
            #if l>0:
            #    hist([p]*l, 
            ylim(-1, numcells)
            labels=["%s" %d for d in range(-1, numcells+1)]
            labels[0]=""; labels[-1]=""
            ax.set_yticklabels(labels)
            xlim(-.5, nbins-.5)
            if (p+1)!=numpats:
                set(ax.get_xticklabels(), visible=False)
            title("Brain: "+brainname+" "+repname+" Templates for stim pattern "+`p+1`, fontsize='small')

        n=brainname+repname+"templates"
        if dosave=="ps":
            #savefig(n, orientation="landscape")
            #savefig(n+".ps", orientation="landscape")
            savefig(n+".ps")
        else:
            savefig(n, dpi=150)
        #show()

    # now match the test patterns with the templates
    ts=0.0
    te=stiminterval
    correct={}
    correct['BK']=0; correct['BD']=0; correct['AK']=0; correct['AD']=0
    numposs=0

    # make a plot of the patterns we are to match
    # one figure for each pattern number, with each AK, AD, BK, BD for that pattern in subplots
    # create the figures in advance, so we can add the subplots to the appropriate figure
    # when we run across that input (they can appear in random order)
    if dosave!=False:
        oplots={}
        osubplotn={}
        for i in range(0, numpats):
            oplots[i]=nextfigure()
            osubplotn[i]=1

        # assume there are same number of BK as AK as AD as BD == neachtype
        neachtype=0
        for (t, n) in prfstimpat:
            if t=='BK':
                neachtype+=1

    for (t, n) in prfstimpat:       # t is template type (BK is before training with key as input, AD after training with data as input
        if t=='BK':
            numposs+=1
        if t=='BK' or t=='BD' or t=='AK' or t=='AD':
            tar=zeros((numcells, nbins))
            #print "got test template for stim pattern", n
            celln=0
            for cellsp in oo: 
                matchend=ts+matchinterval
                a=[s-ts for s in cellsp if s >= ts and s < matchend]       # matchinterval, since only looking at part of stiminterval
                # now we have a list of spike times in the output for that cell, increment the appropriate bin: 
                for spiketime in a:
                    binnum=int(spiketime/bintime)-1
                    #print "time", spiketime, "in bin", binnum
                    tar[celln,binnum]+=1
                celln+=1
            #print "matching test pattern:", tar
            # now we have the spike outputs for this cell in ar

            # find which of our targets is the closest match to this ar pattern
            # compute cross correlations for each cell for each match pattern stored previously in mar:
            ss=[]
            binwin=2        # extra bins on either side of target bin to check for spikes to count as hit
            for p in range(0, numpats):
                #print "pattern", p
                s=0.0
                mar=mp[p]
                for c in range(0, numcells):
                    if 0:
                        # use a standard correlation function
                        # this slides the sequences along each other and finds the maximum 'overlap'
                        co=conv.correlate(tar[c], mar[c])
                        mm=float(max(co))
                        if norm:
                            #print "norm, mm", mm, "sum(co)", sum(co)
                            sc=sum(co)     # why was I dividing by sum of co?  actually, I think it does make sense
                            print "FIXME:  not sure if normalizing by sum(co) makes sense.  Think about this."
                            if sc > 0:
                                mm/=sum(co)       # coeff option to matlab xcorr divides by variance?  no.
                            else:
                                mm=0.0
                            #print "after norm mm", mm
                    else:
                        # a customized version that might give credit to neighboring bins
                        # but doesn't slide them along each other looking for greatest match alignment
                        # so might be less forgiving of
                        # systematic misalignments while also being more forgiving of local errors
                        mm=0.0
                        p=0
                        # mar is the match profile from last training run
                        # tar is the result from the run we are trying to match to a profile (bad name!)
                        ms=mar[c]; ts=tar[c]
                        lm=len(ms)      # better be same as len(ts)!
                        unmatched=0
                        for p in range(0, lm):      # for each bin
                            #if ms[p] > 0:        
                            # this was sort of backwards--looking for spikes in match profile and seeing if it is in what we are trying to match
                            #    lp=p-binwin; rp=p+binwin
                            #    if lp < 0: lp=0
                            #    if rp >= lm: rp=lm-1
                            #    for tp in range(lp, rp):
                            #        if ts[tp]>=1:
                            #            mm+=1.0
                            #            break;
                            # this should be a better way:
                            if ts[p] > 0:
                                # spike in sequence we are trying to match to mp profile
                                # so check for spike in match profile within +- binwin 
                                lp=p-binwin; rp=p+binwin
                                if lp < 0: lp=0
                                if rp >= lm: rp=lm-1
                                gotmatch=False
                                for tp in range(lp, rp):
                                    if ms[tp]>=1:
                                        mm+=1.0
                                        gotmatch=True
                                        break;  # only get credit for matching this spike once!
                                if not gotmatch: unmatched+=1
                        if norm:
                            #den=sum(ts)
                            den=sum(ts)+sum(ms)     # divide by number of spikes in BOTH, to reward having just a few spikes that all line up
                            if den > 0:
                                print "norm:", mm, den, unmatched,
                                mm/=float(den)  # penalize targets having crazy numbers of spikes?
                                print "after norm mm is", mm
                            else:
                                mm=0.0
                    s+=mm
                # s is the sum of all cells' max correlations
                ss.append(s)
                #print s
            # ss is the list of (sums of cells' correlations) indexed by pattern number
            sm=max(ss)
            si=ss.index(sm)
            print "this template",n,"is most like pattern",si,"with a match sum of", sm
            if n==si:
                ans="+ "+`n`+"=="+`si`
                correct[t]+=1
            else: ans="  "+`n`+"!="+`si`
            print t, ans

            if dosave!=False:
                figure(oplots[n])       # select the plot that holds all for pattern #n
                nsp=osubplotn[n]
                print "plot", nsp, "out of", (neachtype*4)/numpats
                #ax=subplot((neachtype*4)/numpats, 1, nsp)            # *4 since we have BK, AK, AD, AK on each plot, /numpats since each pattern has own figure
                ax=mysubplot((neachtype*4)/numpats, 1, nsp)            # *4 since we have BK, AK, AD, AK on each plot, /numpats since each pattern has own figure
                osubplotn[n]=nsp+1
                for cn in range(0, numcells):
                    cp=tar[cn]
                    pa=nonzero(cp)
                    l=len(pa)
                    if l > 0:
                        scatter(pa, [cn]*l)
                # overlay smaller red dots for the match profile template that matched best
                mar=mp[si]      # the pattern we matched most closely
                for cn in range(0, numcells):
                    cp=mar[cn]
                    pa=nonzero(cp)
                    l=len(pa)
                    if l > 0:
                        scatter(pa, [cn]*l, s=4, c='r')

                ylim(-1, numcells)
                labels=["%s" %d for d in range(-1, numcells+1)]
                labels[0]=""; labels[-1]=""
                ax.set_yticklabels(labels)
                xlim(-.5, nbins-.5)
                # get rid of x label on all but bottom subplot
                if nsp!=numpats:
                    set(ax.get_xticklabels(), visible=False)
                #title("Pattern %d Stim %s" %(n+1, t))
                #text(.8, .5, "Matches %d with %f" %(si+1, sm), transform = ax.transAxes)
                title("Pattern %d Stim %s, matches %d with %f" %(n+1, t, si+1, sm), fontsize='small')
            
        ts=te
        te=ts+stiminterval

    b='BD'; a='AD'; print b, correct[b], "  ", a, correct[a] 
    b='BK'; a='AK'; print b, correct[b], "  ", a, correct[a] 

    # save all the match attempt plots
    if dosave!=False:
        for i in range(0, numpats):
            f=oplots[i]
            figure(f)
            n=brainname+repname+"matchresults"+`i+1`
            if dosave=="ps":
                #savefig(n, orientation="landscape")
                #savefig(n+".ps", orientation="landscape")
                savefig(n+".ps")
            else:
                savefig(n, dpi=150)
        
    # here we assume numpossible correct is same for all cases
    return (correct['BD'], correct['AD'], correct['BK'], correct['AK'], numposs)

def mysubplot(nrows, ncols, axnum):
    """ like subplot, but leaves more space for plot titles
        only tested for ncols==1

        ax starts numbering at 1, which will be placed at the top of the plot
    """
    if ncols!=1:
        print "mysubplot may only work for ncols==1!"
    fr=(1.0-.1)/nrows
    frs=.7*fr   # height of each set of axes
    axo=axes([.1, (nrows-axnum)*fr +.1, .8, frs])   # the +.1 leaves space for x label on bottom
    return axo

def StimShuffle(s, randomorder):
    if randomorder:
        n=s     # don't want to disturb s!
        stdrandom.shuffle(n)
        return n
    else:
        return s

def E0MakeStimPatterns(trainseq, nstimpat, randomorder=True):
    # FIXME:  should make training and test order random
    N=-1
    if 1:
        konly1, donly1, trainpats, konly2, donly2=trainseq
        """
        prfstimpat is the pattern of responses to look at when trying to match an answer
        it tells the results analyzer how to interpret the results, look for pattern matches, etc.
        want to be able to shuffle stim patterns, obviously must preserve correlation between keystim, datastim
        and prfstimpat.  maybe should just build prfstimpat and then construct keystimpat and datastimpat from that?
        """
        keystimpat=[]; datastimpat=[]; prfstimpat=[]
        #stim=range(0, nstimpat)
        #nostim=[N]*nstimpat
        trainpstim=zip(['T']*nstimpat, range(0, nstimpat))
        mstim =zip(['M']*nstimpat, range(0, nstimpat))    # m for match
        BKstim=zip(['BK']*nstimpat, range(0, nstimpat))   # before training, K only 
        BDstim=zip(['BD']*nstimpat, range(0, nstimpat))   # before training, D only 
        AKstim=zip(['AK']*nstimpat, range(0, nstimpat))   # before training, K only 
        ADstim=zip(['AD']*nstimpat, range(0, nstimpat))   # before training, D only 
        for i in range(0,konly1):
            prfstimpat+=StimShuffle(BKstim, randomorder)
        for i in range(0,donly1):
            prfstimpat+=StimShuffle(BDstim, randomorder)
        for i in range(0, trainpats-1):
            prfstimpat+=StimShuffle(trainpstim, randomorder)
        # final training sequence will be 'match' template sequence
        if trainpats >= 1:
            prfstimpat+=StimShuffle(mstim, randomorder)
        else:
            print "Error:  trainpats < 1"
            sys.exit(0)
        for i in range(0,konly2):
            prfstimpat+=StimShuffle(AKstim, randomorder)
        for i in range(0,donly2):
            prfstimpat+=StimShuffle(ADstim, randomorder)
        # build keystim and datastim from prfstim
        for (p, d) in prfstimpat:
            if p=='BK' or p=='AK':
                keystimpat.append(d)
                datastimpat.append(-1)
            elif p=='BD' or p=='AD':
                keystimpat.append(-1)
                datastimpat.append(d)
            elif p=='T' or p=='M':
                keystimpat.append(d)
                datastimpat.append(d)
        if 0:
            print "truncating stim patterns for test run"
            prfstimpat=prfstimpat[0:64]; datastimpat=datastimpat[0:64]; keystimpat=keystimpat[0:64]
        if 1:
            print "keystimpat:", keystimpat
            print "datastimpat:", datastimpat
            print "prfstimpat:", prfstimpat
            #sys.exit(0)
        if len(prfstimpat) != len(keystimpat) or len(keystimpat) != len(datastimpat):
            print "patterns should all be same len"
            sys.exit(0)
    return (datastimpat, keystimpat, prfstimpat)

def E0Fitness(self):
    global evalnum  # since we can't easily get our generation # or individual #
    global threadnum

    lock(); threadnum+=1; tn=threadnum; unlock()
    print "RPD:  E0Fitness in thread", tn
    sys.stdout.flush()

    score=0.0
    actual=self.get_values()
    allbd=0; allad=0; allbk=0; allak=0; anumposs=0
    # to get a better picture of how good a parameter set is, run it this many separate times before evaluating fitnss
    numruns=2

    for rn in range(0, numruns):      # could run more than once per individual; won't for now
        # FIXME:  note that when running parallel this will not be deterministic.  change to use gen*something
        # of course even threadnum is not deterministic, it depends on who starts up fastest and checks first.
        # need to make this deterministic
        lock(); evalnum+=1; en=evalnum; unlock()
        brainname="E0_"+`en`

        # get the actual settings for this individual for all the elements in the chrom and stuff them
        # into the brain parms, so the brain we instantiate will use our evolved values!
        parms={}
        pn=0
        global mychrom
        for parmname in mychrom:
            parms[parmname]=actual[pn]
            pn+=1

        if 0:
            # This is good to sanity check:  by removing hebbian learning, the thing should not work at all!
            print "overriding poshebb and neghebb to 0!"
            # might be better to remove them from genome
            parms['poshebbdeltause']=0.0
            parms['neghebbdeltause']=0.0

        print "working on brainname "+brainname
        ro=True
        #randseed=en*numruns+rn        no reason to be this complicated
        randseed=en
        nprocs=1
        success=False
        while not success:
            try:
                print "starting thread %d run %d, randseed %d, brainname %s" %(tn, rn, randseed, brainname)
                (bd, ad, bk, ak, numposs)=E0(brainname=brainname, randseed=randseed, randomorder=ro, dorun=True, nprocs=nprocs, doprofilecheck=True, parms=parms, procnum=(tn%NumComputeNodesToUse))
                success=True
            except:
                print "thread %d run %d got exception on run, will retry!" %(tn, rn)

        print "results for this trial, threadnum %d rn %d: %d %d %d %d %d" %(tn, rn, bd, ad, bk, ak, numposs)
        sys.stdout.flush()
        allbd+=bd; allad+=ad; allbk+=bk; allak+=ak; anumposs+=numposs
        
    print "overall results for all trials with these params:  allbd, allad, allbk, allak", allbd, allad, allbk, allak, anumposs
    sys.stdout.flush()
    # create the overall fitness by combining results from all runs of this individual:
    anumposs=float(anumposs)
    # good fitness is defined as improvement in results of data only and key only runs, after training compared to before training
    #fitness=float(allad-allbd)/anumposs + float(allak-allbk)/anumposs

    fitness=float(allad + allak)/anumposs

    print "tn %d with these brain parms:" % tn, parms
    print "tn %d got combined fitness: %.4f" % (tn, fitness)
    sys.stdout.flush()

    # I think negative fitness is OK for scipy GA, it is normalized
    #if fitness < 0.0:
    #    print "fitness <0.0 changed to 0.0"
    #    fitness=0.0

    return fitness

def E0(brainname=None, randseed=23423, dorun=True, doprofilecheck=False, doplot=False, fullreports=False, nprocs=8, randomorder=True, douseplot=False, parms={}, dosaveplots=False, doshowplots=False, procnum=None):
    """
        First attempt at spike sequence memorization with single column.
        Sanity checks:
            what happens when DoInhib is twiddled?  Do we get more spikes with no inhib?
                Odd.  When DoInhib=False, get very little spiking activity!  why?
                This with maxcond=.02 for all syns . . . 
            disable key input, leave data input on.  Does EM output vary?
                yes.  however each cell in EM (output layer) fires *more* with both on
                    so no interesting interactions, it appears . . . increase Inhib?  
            disable data input, leave key on.  Does EM output vary? 
            increase iprob in BLayer.  spiking should decrease
                when increased form .2 to .4, spiking did decrease in most layers (but not in ER)
        Look for reproducibility or training convergence:
            apply same key and data sequence n times in a row, look for EM output to converge to same
            "name" pattern
        Interesting:  reducing stimdir from 1.0 to .5 causes almost all spiking activity to stop! (only
            1 spike in all areas with .5).  Why?
        Interesting:  with Hebb turned off (0Hebb), but with facil/depress still enabled, repetitive
            stimulus input leads to quickly converging output!  Good.  The initial variation is
            probably due to facil/depression, which quickly settles in
                
        Experiment:  put in 16 paired training sequences, then just data, then just key
        Result:  the (green) spikes during key-only appeared exactly where they did during key and data training
                 there were no (blue) spikes during data only!  so either:  1) key is *always* irrelevant to
                 what appears on output, or 2) after training key *became* irrelvant to what appears on output 
                key is being ignored after training (and unfortunately, possibly before training too!)
                to test which of 1 or 2 is happening, I will do:

        Experiment:  same as above but with another key-only and data-only sequence at the beginning, before
                any pair training, to see what is up.
        Result:  never any blue spikes (data only).  key only inputs before training give very close but not
                exactly same spikes as
                after training with k+d (but might give same results as training with just k--could test that but
                they are quite close even before training.
                conclusions:  data inputs aren't doing anything really.

        Experiment:  repeat above with stronger data input connections from ER->EM (data input to output read area)
            eremprob was 'same' which was bprob which was .6, now 1.0
        Result:  still no blue spikes (data only)
        
        Experiment:  disable inhib to try to get blue spikes
                    no blue spikes in ER under these conditions.  but now there are some in EM!  (Was I looking at ER or EM before?  probably
                        just ER)
                    ah, it turns out *ER* wasn't even getting any blue spikes--that is the input area for data!
                    but there were some blue spikes in EM under these conditions!!  I wonder if there 
                    since it wasn't spiking, that is possibly bad.  maxcond needs to be higher for data->ER and key->L1 area?
                    hmm, maybe not, since I don't want data or key to strictly *force* a spike in ER or L1.  I want
                    there to have to be other coincident activity . . . 

        Experiment: rerun above with inhib turned back on and see if we get activity in *EM*
        Results:    ???

        Experiment: changing task a bit, from symmetrical key/data recall to just seeing if k+d trained->o will still
            give k->o, and k!->o before training 

        Experiment:  poisson seems to give much less variable results among all cells and patterns

        Experiment:  put back to nonpoisson random, looks OK (like it did before)
                still no apparent learning during training

        Experiment:  setting Ftau and Dtau to very low values (.00001) results in spastic spiking
                (see save/b_padastra_1103041169_EMRepfig1.png; png doesn't show all spikes)
                why is this different than simply turning F and D off entirely by selecting
                a None synfd profile?  Actually, a None synfd profile crashed old NCS and caused an
                "insane number of spikes" on more recently compiled NCS . . . 
                allF=.00001; allD=.00001; maxcond=.02
        
        Experiment:  increasing D (only) by 100x, see if spike flurry dies down
                allF=.00001; allFsd=allF/2.0; allD=.001; allDsd=allD/2.0; maxcond=.02
        Results:  still crazy spiking
                DOC:  it would be good to have a concise, accurate statement of sim equations used by NCS
                QUESTION:  is the F parameter alone still screwing things up?

        Experiment:  increasing D (only) another 10x, see if spike flurry dies down
                allF=.00001; allFsd=allF/2.0; allD=.01; allDsd=allD/2.0; maxcond=.02
                still crazy spiking

        Experiment:  increasing D (only) another 10x, see if spike flurry dies down
                allF=.00001; allFsd=allF/2.0; allD=.1; allDsd=allD/2.0; maxcond=.02
                this starts to show the banding patterns, and repeated runs are similar
                limited learning though
                allF=.1; allFsd=allF/2.0; allD=.1; allDsd=allD/2.0; maxcond=.02

        Experiment:  incresing F up from last exp. by 10x
                do see some shifting of blue spikes.  spike pattern is pretty constant tho.

        Looking back at earlier results shows that there *were* ER spikes, just a few, and
                EM didn't seem to do much training then either.  

        things to look for in plots:
            is there learning--do patterns change during k+d input training?
            is there any ER spiking (directly from data input)?
            is d only, and k only, at outset different than d only, and k only at end?  (if not, no learning by assoc. has happened)
            do all/most cells share same activation pattern indication a gross minicolumn "on" state?

        question:  look at activation cell by cell, or column wide?
                could do experiment to see if cells in column differ in activation or if they
                are all the same . . . 
                with current parameters, it does look like all cells have same activation pattern

        idea:  with maxcond set so high, learning parameters don't have a chance to come into play,
            they are overwhelmed
        idea:  stop facil and depression, see if hebbian is enough to get assoc. learning
        idea:  spikes are too far apart for any association to happen . . . do we need
            some sort of higher frequency 'clock'?

        IDEA:  what kind of memory task makes sense?
            0)  "symmetric":  present K, D separately.  show don't get same EM output O.
                train with K+D, record EM.  Then present K, get same O.  Then present
                D, get same O.  K and D have become associated with same output.
            1)  Present K only, repeatedly, demonstrate that don't get convergent EM output
                Then present K+D, get some EM output O.  Then present just K again and get O.
            2)  "simple single":  present K repeatedly to train network, converge to output O,
                then present beginning of K, get self-triggering full O output sequence
            3)  "associative":  present K+D get O; then present just D, get O still.
                BUT:  this is not convincing because perhaps you could have just presented
                D alone and gotten same O?  could first demonstrate that D alone would have
                given different O?

        THINGS TO TRY:
            3 synapse profiles, per latest PFC data

        Some things to look at:
            is there spiking in all layers?  if not, then  clearly something is superfluous
            is there a lot of residual, resonant spiking in stiminterval after the stim application, or does it
                    pretty much stop after stimdur?
            do key and data input reports look like they should, with clean input patterns?
                    these reports show the separate col input areas so should be very clean
            is there any/reasonable amount of ER activity?
            is there thalamic activity?

        * Still getting very little ER activity.  This is bad because it is where Data connects!
            the only input into ER is from Data input
        * interestingly, very little L1 activity for data only inputs
            that makes sense given the connectivity of the network 
        * EM is the only later that seems to have activity for both key only and data only inputs
            also, generally the residual activity (after actual stim input) seems to subside after K+D training
        * potential objection:  could be doing simple coincidence detection:  output spikes
            when both inputs spikes.  Response:  no, final test output is done when only
            one input is present, and the output is different than it was when only one input
            was presented initially; so, *learning* must be taking place!

        going to try reducing inhib and EREMprob, increasing prob to ER to get something going on
            in ER

    """

    if brainname is None:
        # create a unique brain name for a new run:
        brainname="b_"+socket.gethostname()+"_"+`int(time.time())`

    if doplot or fullreports: gather=True
    else: gather=False
        
    gatherkeydatareports=gather
    gatherUSEreports=gather
    gatherthalreport=gather
    gatherotherlayerreports=gather
    
    threesynprofiles=True

    """ each stim application is stimdur long
        each stim application will *begin* stiminterval apart
        there will thus be stiminterval-stimdur empty space between stims (though *response* may linger)
        thus the entire stim sequence will repeat according to pattern defined in trainseq 
    """
    #### ALL EXPERIMENTAL PARAMETERS (should be) DEFINED IN THIS BLOCK
    # any parameters set in parms coming in will be preserved, otherwise these defaults will be set:
    if threesynprofiles:
        DFs=(("CL1", .85583, .28090, .01897, .04140, .24, .14, 3.42, 2.64),
             ("CL2", .20482, .10234, .16226, .15119, .30, .11, 3.41, 3.38),
             ("CL3", .29235, .23446, .71033, .22051, .26, .14, 3.52, 2.79))
        synprobs=(("syn-CL1", .25), ("syn-CL2", .44), ("syn-CL3", 1.0))
    else:
        synprobs=None
    #initA=(3.55, 2.97)      # MAXCOND, effectively, should have 3 clusters of this since there are no stddev in .in file for MAXCOND
    # ER is data input target and wasn't spiking much.  thal connect to ER should help:
    # should check on biological data on this
    connectthaltoER=False; thaltoERprob=.5
    defaultparms=  {'stimdur':          0.5,            # the actual stim sequences generated are this long
                    'stiminterval':     1.0,            # make this longer to have a rest after stimdur.  should be >=stimdur
                    'allmaxcond':       0.019,
                    'ntapecells':       5,
                    'otherlaysize':     10,             # size of L1, ER, ES, I  
                    'thalsize':         10,
                    'eresprob':         None,
                    'eremprob':         1.0,
                    'l1emprob':         None,
                    'datatoERprob':     1.0,
                    'keytoL1prob':      1.0,
                    'esthalprob':     0.5,
                    'thaltoL1prob':     0.5,
                    'L1L5prob':         None,       # none for backward compatibility, but probably desirable
                    'L6L4prob':         None,       # none for backward compatibility, but probably desirable
                    'DoInhib':          True,
                    'trainseq':         (2, 2, 2, 2, 2),   # keyonly, dataonly, both, keyonly, dataonly,
                    'nstimpat':         8,
                    'interspike':       50,
                    'poshebbdeltause':  .15,
                    'neghebbdeltause':  .15,
                    'initRSE':          (1.0, 0.0),     # 1.0=start fully facilitated
                    'initUSE':          (.26, .13),     # the window within which short term facil and depress operate, adjusted by Hebbian.  (applied to E only?)
                    'allF': .1,
                    'allD': .1}
    #### END OF ALL EXPERIMENTAL PARAMETERS
    
    # make sure the parms caller is trying to set are really valid:
    for p in parms:
        if p not in defaultparms:
            print "Trying to override parameter", p, "which is not in default parameter set."
            sys.exit(0)

    # set all the parms to defaults unless caller overrode them:
    for p in defaultparms:
        if p not in parms:
            parms[p]=defaultparms[p]
        else:   # the parm was already set, passed in by caller
            print "default for parm "+p+" overrode with value ", parms[p]

    # some experimental variable are derived from others:
    w=parms['ntapecells']
    datainsize=w
    keyinsize=w
    """ em is output area size, doesn't really need to be same as key or data width since we are
      only comparing output templates in em to other output templates in em """
    emsize=w
    allFsd=parms['allF']/2.0
    allDsd=parms['allD']/2.0

    #print "experimental parms:", parms

    stdrandom.seed(randseed)
    seed(randseed, (randseed*7)+1)
    (datastimpat, keystimpat, prfstimpat)=E0MakeStimPatterns(parms['trainseq'], parms['nstimpat'], randomorder)

    endsim=len(keystimpat)*parms['stiminterval']

    #print " key stim sequence:", keystimpat, "len", len(keystimpat)
    #print "data stim sequence:", datastimpat, "len", len(datastimpat)

    if len(datastimpat)*parms['stiminterval'] > endsim:
        print "there are", len(datastimpat),"stim applications of length", stiminterval,"seconds each but sim is only", endsim
        sys.exit(0)

    print "brainname is "+brainname
    stdrandom.seed(randseed)
    seed(randseed, (randseed*7)+1)
    maxcond=parms['allmaxcond']
    newb=BRAIN({"TYPE": "AREA-BRAIN", "FSV": "10000.00", "JOB": brainname,
                "SEED": "-"+`randseed`, "DURATION": `endsim`, "OUTPUT_CELLS": "YES",
                "OUTPUT_CONNECT_MAP": "YES", "DISTANCE": "YES", "_MAXCOND": "%.6f"%maxcond})

    # with key input blocked, and inhib disabled:
    #   maxcond of .01 results in no spikes, .02 results in some in all layers (ER is weak)
    # don't need to do this anymore:
    #chantypes, spks, comptypes, celltypes, spsgs, sfds, sls, syntypes, lays = newb.StandardStuff(maxcond=parms['allmaxcond'])
    syntypes=newb.syntypes      # or =newb.libs['standard'].syntypes
    sls=newb.sls
    sfds=newb.sfds
    celltypes=newb.celltypes

    if 0:
        # disable facilitation/depression and/or Hebbian learning for all synapses, as a test:
        #print syntypes
        #print sfds
        for s in syntypes.keys():
            #pass
            #syntypes[s].parms["LEARN_LABEL"]=sls["0Hebb"]
            syntypes[s].parms["LEARN_LABEL"]=sls["+Hebb"]
            #syntypes[s].parms["SFD_LABEL"]=sfds["F0"]       # crashes old compile NCS, causes insane number of spikes on new NCS
        #sys.exit(0)
        print "cleared Hebbian and facilitation/depression for all synapse types"

    if parms['neghebbdeltause'] is not None:
        bhebb=sls["BHebb"]
        bhebb.parms["NEG_HEB_PEAK_DELTA_USE"]="%.4f %.4f" %(parms['neghebbdeltause'], 0.0)

    if parms['poshebbdeltause'] is not None:
        bhebb=sls["BHebb"]
        bhebb.parms["POS_HEB_PEAK_DELTA_USE"]="%.4f %.4f" %(parms['poshebbdeltause'], 0.0)

    # turning off channels currently results in wacky results.  examine the reports sometime and see why
    if 'usechans' in parms:
        if parms['usechans']==False:
            cmp=comptypes['SOMA1']
            cmp.parms["CHANNEL"]={}

    if 0:
        print "changing F, D params in F2 synfd profile"
        f2=sfds["F2"]
        f2.parms["FACIL_TAU"]="%.6f %.6f" % (.2836, .2925)
        f2.parms["DEPR_TAU"]="%.6f %.6f" % (.4032, .3634)
        #print f2

    if 1:
        # NOTE:  can't actually set tau depr to 0.  Get load error from NCS (this was fixed in later version of NCS)
        print "setting F, D params in F2 synfd profile"
        f2=sfds["F2"]
        f2.parms["FACIL_TAU"]="%.6f %.6f" % (parms['allF'], allFsd) 
        f2.parms["DEPR_TAU"]="%.6f %.6f" %  (parms['allD'], allDsd)
        #print f2

    if 1:
        print "changing U, A params in E synapse"
        cs=syntypes["E"]
        cs.parms["RSE_INIT"]="%.4f %.6f" % parms['initRSE']
        cs.parms["ABSOLUTE_USE"]="%.4f %.6f" % parms['initUSE']       # "U"
        #print cs

    if 1:
        print "changing Hebbian learning to BOTH for E and I"
        cs=syntypes["E"]
        cs.parms["LEARN_LABEL"]=sls["BHebb"]
        cs=syntypes["I"]
        cs.parms["LEARN_LABEL"]=sls["BHebb"]
        #print cs

    if threesynprofiles:
        for (nm, D, Dsd, F, Fsd, U, Usd, A, Asd) in DFs:
            newsyn=newb.Copy(syntypes, "E", "syn-"+nm)
            newsfdname="sfd-"+nm
            newsyn.parms["SFD_LABEL"]=newsfdname
            newsyn.parms["RSE_INIT"]="%.4f %.4f"% (1.0, 0.0)
            newsyn.parms["MAX_CONDUCT"]=A/10.0
            newsyn.parms["ABSOLUTE_USE"]="%.4f %.4f"% (U, Usd)
            # skip Asd (MAX_CONDUCT has no SD; we could simulate this ourselves by creating many syntypes but won't do that now)
            newsfd=newb.Copy(sfds, "F1", newsfdname)
            newsfd.parms["SFD"]="BOTH"
            newsfd.parms["FACIL_TAU"]="%.4f %.4f"% (F, Fsd)
            newsfd.parms["DEPR_TAU"]="%.4f %.4f"% (D, Dsd)
    #print syntypes
    #print sfds
    #sys.exit(0)

    # standard library doesn't contain a cell type called EM, so we'll make one based on ES:
    EM=newb.Copy(celltypes, "ES", "EM")
    L1=newb.Copy(celltypes, "ES", "L1")

    # default inhib and excite synapse types used for all connections
    snE="E"; stE=syntypes[snE]
    snI="I"; stI=syntypes[snI]

    cn="col"
    #bc=BCol2(newb, cn, emsize=emsize, otherlaysize=parms['otherlaysize'], DoInhib=parms['DoInhib'], eremprob=parms['eremprob'], synE=snE, synI=snI, synprobs=synprobs)
    bc=BCol2(newb, cn, parms, emsize=emsize, synE=snE, synI=snI, synprobs=synprobs)

    # NOTE:  data in area, key in area, thalamus area are all implemented as groups of single-cell columns
    # NOTE:  somewhat confusing that the cellgroup is named ER in all th
    # NOTE:  also somewhat confusing in that the EM, ER, ES, L1 layers are not implemented as separate NCS layers here, but instead cellgroups in a layer
    datain=[]       # keep a list of the names
    y=-100
    for i in range(0, datainsize):
        n="datain"+`i`
        c=newb.Standard1CellColumn(n, loc=(10, 10, -300, y))
        datain.append(n)
        newb.AddConnect(c, (bc, "layER"), stE, parms['datatoERprob'], 10.0)
        y+=10

    keyin=[]        # keep a list of the names
    y=100
    for i in range(0, keyinsize):
        n="keyin"+`i`
        c=newb.Standard1CellColumn(n, loc=(5, 5, -300, y))
        keyin.append(n)
        newb.AddConnect(c, (bc, "layL1"), stE, parms['keytoL1prob'], 10.0)
        y+=10

    thal=[]
    y=-50
    for i in range(0, parms['thalsize']):
        n="thal"+`i`
        c=newb.Standard1CellColumn(n, loc=(5, 5, 300, y))
        thal.append(n)
        # NOTE:  the ER in the thalamus has nothing to do with the ER layer of the column proper.  ER is just the
        # name of the cell group in the thalamus
        newb.AddConnect(c, (bc, "layL1"), stE, parms['thaltoL1prob'], 10.0)
        newb.AddConnect((bc, "layL1"), c, stE, parms['esthalprob'], 10.0)
        if connectthaltoER:
            newb.AddConnect(c, (bc, "layER"), stE, parms['thaltoERprob'], 10.0)
        y+=10

    """
    comptypes["SOMA1"].DelChannel("ahp-2")
    print comptypes["SOMA1"]
    """

    #type='poisson'
    type=None
    stdrandom.seed(randseed)
    seed(randseed, (randseed*7)+1)
    #E0PrepStim(newb, datain, datastimpat, keyin, keystimpat, parms['stimdur'], parms['stiminterval'], type=type, interspike=parms['interspike'])
    E0PrepStimDistinct(newb, datain, datastimpat, keyin, keystimpat, parms['stimdur'], parms['stiminterval'], type=type, interspike=parms['interspike'])

    d=(0.0, endsim)

    if gatherkeydatareports:
        # report on key and data for a subset of cells
        # since this area is enumerated completely (separate column for each cell) we need to issue one report per cell
        for k in [0, 1]:        # only gather these reports for first couple of cells
            newb.AddSimpleReport("Key"+`k`+"Rep", keyin[k], "v", dur=d)
        for k in [0, 1]:
            newb.AddSimpleReport("Data"+`k`+"Rep", datain[k], "v", dur=d)
    
    if gatherthalreport:    
        for k in range(0, parms['thalsize']):
            newb.AddSimpleReport("Thal"+`k`+"Rep", thal[k], "v", dur=d)

    if gatherUSEreports:
        # how do these end up with so many columns when EM is only a few cells wide?
        # ah yes, we are looking at *synapses*, duh
        newb.AddSimpleReport("EMRSERep", (bc, "layEM"), "r", freq=1000, dur=d, synname=snE)
        newb.AddSimpleReport("EMUSERep", (bc, "layEM"), "a", freq=1000, dur=d, synname=snE)

    # EM is the "output" area
    newb.AddSimpleReport("EMRep", (bc, "layEM"), "v", dur=d)

    if gatherotherlayerreports:
        newb.AddSimpleReport("L1Rep", (bc, "layL1"), "v", dur=d)
        newb.AddSimpleReport("ERRep", (bc, "layER"), "v", dur=d)
        newb.AddSimpleReport("ESRep", (bc, "layES"), "v", dur=d)

    if dorun:
        stdrandom.seed(randseed)
        seed(randseed, (randseed*7)+1)
        newb.Run(verbose=True, nprocs=nprocs, procnum=procnum)

    print "brain "+brainname+" done."

    #newb.Plot3D()

    #def E0Results(brainname, doprofilecheck=False, douseplot=False, doplot=False):
    
    if douseplot:
        ds=None
        print "USE and RSE reports"
        ReportPlot(brainname, "EMUSERep", plottype="scatter", plottitle="Change in EM Layer USE", xlab="Timestep", ylab="USE", dosave=ds)
        ReportPlot(brainname, "EMRSERep", plottype="scatter", plottitle="Change in EM Layer RSE", xlab="Timestep", ylab="RSE", dosave=ds)

    if doplot:
        # for processing existing data:
        #brainname="b_padastra_1101954171"       # with k only at start
        #brainname="b_padastra_1101956018"       # no k only start, in case that skews the training 
        #brainname="b_padastra_1101960874"       # first with poisson trains
        #brainname="b_padastra_1101960874"       # back to non-poisson as a test 
        #brainname="b_padastra_1103396909"        # No D only input.  meeting #1 
        #brainname="b_padastra_1103755398"        # with K only, D only input.  meeting #2   ONLY +HEBB!
        #brainname="b_padastra_1103767803"        # with K only, D only input.  postmeeting #1   Both HEBB!
        #brainname="b_padastra_1104207380"        # changed K, D, Thal connection probs from 1 to .5
        #brainname="b_padastra_1104291340"       # first sort-of evidence of pattern learning
        #brainname="b_padastra_1105931234"
        #brainname="b_padastra_1105934136"       # reduced maxcond from .02 to .012 from b_padastra_1105931234
        # plot reports that we have already collected

        ns=len(datastimpat)
        nc=2  # too much data to show all cells most of the time, so just show one 
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "EMRep", datastimpat, keystimpat, ncells=nc, dosave=dosaveplots)  # OUTPUT layer

        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "ESRep", datastimpat, keystimpat, ncells=nc, dosave=dosaveplots)  # EM,ER->ES->Thal
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "ERRep", datastimpat, keystimpat, ncells=nc, dosave=dosaveplots)  # Data applied here
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "L1Rep", datastimpat, keystimpat, ncells=nc, dosave=dosaveplots)  # the "buss", Key applied here
        # these are enumerated columns, so each column only has one cell.  also, we don't request reports on each column.
        # cell number is encoded into report name.
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "Key0Rep", datastimpat, keystimpat, ncells=1, dosave=dosaveplots)
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "Data0Rep", datastimpat, keystimpat, ncells=1, dosave=dosaveplots)
        if 1: E0ProcessData(brainname, "ignored", parms['stiminterval'], "Thal0Rep", datastimpat, keystimpat, ncells=1, dosave=dosaveplots)
        #show()

    if doprofilecheck:
        # look for pattern matches
        norm=True
        res=E0CheckResults(brainname, "EMRep", parms['stimdur'], parms['stiminterval'], keystimpat, datastimpat, prfstimpat, norm=True, dosave=dosaveplots)
        return res

    return (1, 1, 1, 1, 1)      # just to have the right size return tuple

def Retest(parms, nruns=10, startn=None, endn=None, nprocs=1, dorun=True):
    # might be good to parallelize this.  at least we do run on some number of processors

    if startn==None:
        startn=400
        endn=startn+nruns

    if endn==None:
        endn=startn+nruns

    allbd=0; allad=0; allbk=0; allak=0; anumposs=0
    for randseed in range(startn, endn):
        ro=True
        brainname="E0"+`randseed`
        if 0:
            (bd, ad, bk, ak, numposs)=E0(brainname=brainname, randseed=randseed, randomorder=ro, dorun=False, doprofilecheck=False, doplot=True)
            print bd, ad, bk, ak, numposs
            allbd+=bd; allad+=ad; allbk+=bk; allak+=ak; anumposs+=numposs
        elif 1: # run and check results in one step
            fr=True
            #dorun=True
            (bd, ad, bk, ak, numposs)=E0(brainname=brainname, randseed=randseed, randomorder=ro, dorun=dorun, nprocs=nprocs, fullreports=fr, doprofilecheck=True, doplot=False, dosaveplots="ps", parms=parms)
            print bd, ad, bk, ak, numposs
            sys.stdout.flush()
            allbd+=bd; allad+=ad; allbk+=bk; allak+=ak; anumposs+=numposs                
        else:
            E0(brainname=brainname, randseed=randseed, randomorder=ro, dorun=False, doplot=True)
            show()

    print "allbd, allad, allbk, allak", allbd, allad, allbk, allak, anumposs
    # good fitness is defined as improvement 
    #anumposs=float(anumposs)
    #fitness=float(allad-allbd)/anumposs + float(allak-allbk)/anumposs
    # just define fitness as absolute fraction of post-training answer correct, normalized by number of runs.  so, 0-1.0
    # logically, there should be no way the fraction of pre-training correct could be above chance.  it it is,
    # we need to figure out why!
    fitness=float(allad + allak)/float(anumposs) 
    print "overall fitness: %.4f" % (fitness)

def LJDemoPlot():
    # demo plot for LJ and thesis 
    f=nextfigure()
    np=3; pn=1

    subplot(np, 1, pn); pn+=1
    ts=150000       # start at 20 sec
    dd=100000       # go for 10 more sec
    ReportPlot("E0332", "EMUSERep", newfigure=False, cols=[75, 80, 85], linelab=['synapse 75', 'synapse 80', 'synapse 85'], xlab="", xrange=(ts, ts+dd), legendloc='center right', ylab='USE')
    # dup of above style SpikePatternPlot("E0332", ["L1Rep"], newfigure=False, ylab='L1 cell number', xrange=(0, 4))
    locs, labels=xticks(); xticks(locs, [""]*len(locs))

    subplot(np, 1, pn); pn+=1
    # this must come from another model since EM doesn't have that many cells anymore . . . 
    SpikePatternPlot("E0332", ["EMRep"], newfigure=False, xlab='', ylab='EM cell number', xrange=(15, 25))
    locs, labels=xticks(); xticks(locs, [""]*len(locs))
    legend(('Each dot is a spike', ), loc='center right')
    #legend()

    # this looks OK, not great, but doesn't contribute much new info and squishes other plots in size:
    #subplot(np, 1, pn); pn+=1
    #ReportSpikePlot("E0332", ["EMRep"], newfigure=False, xrange=(ts, ts+dd))

    subplot(np, 1, pn); pn+=1
    ReportPlot('E0332', ['EMRep'], newfigure=False, cols=[5], linelab=['EM cell 5'], xrange=(ts, ts+dd), legendloc='center right')

    savefig("LJfig1.ps"); print "saved to .ps"
    #show()

#class my_genome_evaluator(ga.genome.default_evaluator):
#    """ just like default except that it takes i, which gives the number
#        of this individual in popluation (useful for parallel execution
#        to map to node number perhaps) and also passes that along to the
#        genome evaluator
#    """
#    def evaluate(self, genome, i):
#        print "in my genome evaluator", i
#        return genome.performance()

#class my_genome_evaluator:
#    def evaluate(self,genome, n):
#        """ n is number of individual in population we are evaluating"""
#        return genome.performance(n) #if a performance() method is available, use it!

class this_genome(ga.genome.list_genome):
    """ same as base with new evaluate that passes individual # to evaluator

        list genome doesn't provide its own evaluator.  it inherits
        the base genome evaluator.  however it does seem to invoke evaluate
        in subclasses (?) so we can't override evaluate with an addl arg
        without changing those . . . sigh 
    """
    #def evaluate(self, force=0, ind=0):
    #    """ based on the one in genome.py """
    #    print "in this_genome evaluate()", ind  
    #    if (not self.evaluated) or force:
    #        self._score = self.evaluator.evaluate(self, ind)
    #        self.evaluated = 1
    #        self.evals = self.evals + 1
    #    else:
    #        print "already evaluated this individual"
    #    return self._score
    pass

class my_pop_evaluator(ga.population.default_pop_evaluator):
    def evaluate(self, pop, force=0, mode='parallel'):
        print "in RPD parallel pop evaluator"
        # RPD: new parallel
        if mode=='parallel':
            print "doing parallel evaluation"
            # invoke each evaluate in a separate thread
            thl=[]
            # semaphore lock evals variable?  (is it even used?) other data too?
            # FIXME:  check that assumption
            # hopefully, genomes only manipulate their internal structures
            # and population level structures are only changed after all
            # evaluation threads are complete
            evals=0
            i=0
            # somewhat confusingingly, len(pop) is not usually the popsize
            # we requested.  the pop also contains previously evaluated members,
            # some of which may be part of the current pop and (I think) some not.
            # if I request popsize of 16, len(pop) after first generation is
            # 28 and there are a variable number of new evaluations each generation
            # (say 10, or 12).  Here we will invoke a thread on each individual in
            # pop, but many will return almost immediately the already-calculated
            # fitness value.
            print "population has",len(pop),"members"
            global threadnum
            threadnum=0
            i=0
            for ind in pop:
                #thist=threading.Thread(target=ind.evaluate, args=(force))
                thist=threading.Thread(target=ind.evaluate, args=(force, )) # force?  obsolete?
                thl.append(thist)
                thist.start()
                #time.sleep(1)
                i+=1
            # monitor threads
            st=time.time(); et=0
            nl=threading.enumerate()
            while(len(nl) > 1):
                time.sleep(1)
                nt=time.time()
                nl=threading.enumerate()
                et=int(nt-st)
                if et > 1200:    # print status after 20 minutes
                    print "there are", len(nl), "threads running, elapsed time:", et, nl
            print "all threads completed in", et
            print "gen fitness:",
            fl=""
            # note that this will invoke evaluate() again but that won't actually
            # call the fitness function, it will just print the already calculated
            # value
            for ind in pop:
                #fl+="%f " % ind.fitness()  # errors.  _fitness not assigned yet?
                fl+="%f " % ind.score() 
            print fl
        else:
            print "only parallel supported here!"
            sys.exit(0)
        return

if __name__ == "__main__":
    if 1:
        try:
            import psyco
            psyco.profile()
        except:
            print "psyco not installed.  It may speed things up.  See http://psyco.sf.net"

    # retest a good model lots of times:
    if 1:
        parms={'esthalprob': 0.41941611021757125, 'allF': 0.18008129209280013, 'allD': 0.2536232104897499, 'L1L5prob': 0.41860541701316833, 'initRSE': (1.0, 0.0), 'eresprob': 0.18882301449775696, 'allmaxcond': 0.039583496451377868, 'interspike': 90, 'trainseq': (2, 2, 2, 2, 2), 'datatoERprob': 1.0, 'initUSE': (0.26000000000000001, 0.13), 'nstimpat': 8, 'stimdur': 0.5, 'thaltoL1prob': 0.5, 'l1emprob': 0.77312687337398533, 'poshebbdeltause': 0.11259778544306755, 'L6L4prob': 0.15926451981067657, 'eremprob':0.67414405941963196, 'stiminterval': 1.0, 'thalsize': 10, 'ntapecells': 4, 'DoInhib': True, 'neghebbdeltause': 0.13090649253129957, 'keytoL1prob': 1.0, 'otherlaysize': 10} 

        Retest(parms, nruns=1, startn=462, nprocs=2)
        #Retest(parms, nruns=1, startn=461, nprocs=1, dorun=False)
        sys.exit(0)

    # run a GA search for a good model:
    if 0:
        global threadnum
        global evalnum
        evalnum=1
        genelist=[]
        genelist.append((ga.gene.list_gene([2, 3, 4, 5, 6, 7, 8]), 'ntapecells'))
        genelist.append((ga.gene.float_gene((.005, .05)), 'allmaxcond'))
        genelist.append((ga.gene.float_gene((.005,.3)), 'allF'))
        genelist.append((ga.gene.float_gene((.005,.3)), 'allD'))
        genelist.append((ga.gene.float_gene((.005,.3)), 'poshebbdeltause'))
        genelist.append((ga.gene.float_gene((.005,.3)), 'neghebbdeltause'))
        genelist.append((ga.gene.list_gene([(float(x)/1000.0) for x in range(200, 801, 100)]), 'stimdur'))
        genelist.append((ga.gene.list_gene(range(20, 120, 10)), 'interspike'))

        genelist.append((ga.gene.float_gene((.05, 1.0)), 'eresprob'))
        genelist.append((ga.gene.float_gene((.05, 1.0)), 'eremprob'))
        genelist.append((ga.gene.float_gene((.05, 1.0)), 'l1emprob'))
        genelist.append((ga.gene.float_gene((.05, 1.0)), 'esthalprob'))
        genelist.append((ga.gene.float_gene((0.0, 1.0)), 'L1L5prob'))
        genelist.append((ga.gene.float_gene((0.0, 1.0)), 'L6L4prob'))

        all_genes=[]
        #global mychrom
        for (g, parmname) in genelist: 
            all_genes+=g.replicate(1)
            mychrom.append(parmname)

        # overriding evaluate is tricky.  it is called lots of places and does actual
        # work on list.

        # genome.evaluate invokes performance()
        this_genome.performance=E0Fitness
        # in order to do this I would have to change the genomes evaluate() method since that calls
        # evaluator.evaluate(self).  I would have to add an arg of n
        #this_genome.evaluator=my_genome_evaluator()
        gnm=this_genome(all_genes)
        #gnm.evaluator=my_genome_evaluator()
        pop=ga.population.population(gnm)
        pop.evaluator=my_pop_evaluator()
        galg=ga.algorithm.galg(pop)
        settings={'pop_size':16,'p_replace':.8,'p_cross': .8, 'p_mutate':'gene', 'p_deviation': 0.,'gens':64,'rand_seed':0,'rand_alg':'CMRG'}
        galg.settings.update(settings)
        #gnm.evaluator.evaluate(None, 2)        # test
        galg.evolve()
        print "best:", galg.pop.best()
