THEANO.py 3.59 KB
__author__ = 'chunk'

from ...mfeat import *
from ...mmodel import *
from ...mspark import SC
from ...common import *
from .theanoutil import *

import numpy as np
from sklearn import cross_validation

from theano import function, config, shared, sandbox
import theano.tensor as T

import gzip
import cPickle

package_dir = os.path.dirname(os.path.abspath(__file__))


class ModelTHEANO(ModelBase):
    """
    Some notes:

    1.<http://deeplearning.net/software/theano/faq.html>
    Error allocating 1411344000 bytes of device memory (out of memory). Driver report 203563008 bytes free and 3220897792 bytes total

    This scenario arises when an operation requires allocation of a large contiguous block of memory but no blocks of sufficient size are available.
    GPUs do not have virtual memory and as such all allocations must be assigned to a continuous memory region. CPUs do not have this limitation because or their support for virtual memory. Multiple allocations on a GPU can result in memory fragmentation which can makes it more difficult to find contiguous regions of memory of sufficient size during subsequent memory allocations.


    """

    def __init__(self, toolset='cnn', sc=None):
        ModelBase.__init__(self)
        self.toolset = toolset
        self.sparker = sc
        self.model = None

    def _shared_dataset(self, data_xy, borrow=True):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        data_x, data_y = data_xy
        shared_x = theano.shared(np.asarray(data_x,
                                            dtype=theano.config.floatX),
                                 borrow=borrow)
        shared_y = theano.shared(np.asarray(data_y,
                                            dtype=theano.config.floatX),
                                 borrow=borrow)
        # When storing data on the GPU it has to be stored as floats
        # therefore we will store the labels as ``floatX`` as well
        # (``shared_y`` does exactly that). But during our computations
        # we need them as ints (we use labels as index, and if they are
        # floats it doesn't make sense) therefore instead of returning
        # ``shared_y`` we will have to cast it to int. This little hack
        # lets ous get around this issue
        return shared_x, T.cast(shared_y, 'int32')

    def _train_cnn(self, X=None, Y=None, dataset=os.path.join(package_dir, '../../res/', 'ils_crop.pkl'),
                   learning_rate=0.1, n_epochs=200,
                   nkerns=[20, 50, 50],
                   batch_size=400):

        return train_cnn_example(X, Y, dataset=dataset, learning_rate=learning_rate, n_epochs=n_epochs, nkerns=nkerns,
                                 batch_size=batch_size)


    def train(self, X, Y):
        if self.toolset == 'cnn':
            return self._train_cnn(X, Y)
        else:
            raise Exception("Unknown toolset!")


    def predict(self, feat, model=None):
        if self.toolset == 'cnn':
            return self._predict_cnn(feat, model)
        else:
            raise Exception("Unknown toolset!")


    def test(self, X, Y, model=None):
        if self.toolset == 'cnn':
            return self._test_cnn(X, Y, model)
        else:
            raise Exception("Unknown toolset!")