markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
hash
stringlengths
32
32
First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.
url = 'https://commondatastorage.googleapis.com/books1000/' last_percent_reported = None data_root = '.' # Change me to store data elsewhere def download_progress_hook(count, blockSize, totalSize): """A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 5% change in download progress. """ global last_percent_reported percent = int(count * blockSize * 100 / totalSize) if last_percent_reported != percent: if percent % 5 == 0: sys.stdout.write("%s%%" % percent) sys.stdout.flush() else: sys.stdout.write(".") sys.stdout.flush() last_percent_reported = percent def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" dest_filename = os.path.join(data_root, filename) if force or not os.path.exists(dest_filename): print('Attempting to download:', filename) filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook) print('\nDownload Complete!') statinfo = os.stat(dest_filename) if statinfo.st_size == expected_bytes: print('Found and verified', dest_filename) else: raise Exception( 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?') return dest_filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
2ff2c765cef08d033a661824924fc4f8
Extract the dataset from the compressed .tar.gz file. This should give you a set of directories, labeled A through J.
num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall(data_root) tar.close() data_folders = [ os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
537d58662c950449e0641619e14bde9c
Problem 1 Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
# Solution for Problem 1 import random print('Displaying images of train folders') # Looping through train folders and displaying a random image of each folder for path in train_folders: image_file = os.path.join(path, random.choice(os.listdir(path))) display(Image(filename=image_file)) print('Displaying images of test folders') # Looping through train folders and displaying a random image of each folder for path in test_folders: image_file = os.path.join(path, random.choice(os.listdir(path))) display(Image(filename=image_file))
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
8a186640f29fe482c795f5625ce853bd
Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size. We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. A few images might not be readable, we'll just skip them.
image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) print(folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
d920b00460e105990f329124d70272e3
Problem 2 Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
# Solution for Problem 2 def show_first_image(datasets): for pickl in datasets: print('Showing a first image from pickle ', pickl) try: with open(pickl, 'rb') as f: letter_set = pickle.load(f) plt.imshow(letter_set[0]) except Exception as e: print('Unable to show image from pickle ', pickl, ':', e) raise print('From Training dataset') show_first_image(train_datasets) print('From Test Dataset') show_first_image(test_datasets)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
4053054aae21eb1f98417db69fb81a4f
Problem 3 Another check: we expect the data to be balanced across classes. Verify that.
def show_dataset_shape(datasets): for pickl in datasets: try: with open(pickl, 'rb') as f: letter_set = pickle.load(f) print('Shape of pickle ', pickl, 'is', np.shape(letter_set)) except Exception as e: print('Unable to show image from pickle ', pickl, ':', e) raise print('Shape for Training set') show_dataset_shape(train_datasets) print('Shape for Test set') show_dataset_shape(test_datasets)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
d5996cb9f0196e7aa559203bd24442e2
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9. Also create a validation dataset for hyperparameter tuning.
def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels """ train_size = 200000 valid_size = 10000 test_size = 10000 """ train_size = 20000 valid_size = 1000 test_size = 1000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
da73be361c134977828904272fb128d1
Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
4b7a3afa5d155d69f6d1e353fa17b3f3
Problem 4 Convince yourself that the data is still good after shuffling!
print('Printing Train, validation and test labels after shuffling') def print_first_10_labels(labels): printing_labels = [] for i in range(10): printing_labels.append(labels[[i]]) print(printing_labels) print_first_10_labels(train_labels) print_first_10_labels(test_labels) print_first_10_labels(valid_labels)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
c46fdf27f0d1af3d063f7cace542756a
Finally, let's save the data for later reuse:
pickle_file = os.path.join(data_root, 'notMNIST.pickle') try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size)
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
700eee30e854b2f62482a199d06a9052
Problem 5 By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it. Measure how much overlap there is between training, validation and test samples. Optional questions: - What about near duplicates between datasets? (images that are almost identical) - Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments. Problem 6 Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it. Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model. Optional question: train an off-the-shelf model on all the data!
logreg_model_clf = LogisticRegression() nsamples, nx, ny = train_dataset.shape d2_train_dataset = train_dataset.reshape((nsamples,nx*ny)) logreg_model_clf.fit(d2_train_dataset, train_labels) from sklearn.metrics import accuracy_score nsamples, nx, ny = valid_dataset.shape d2_valid_dataset = valid_dataset.reshape((nsamples,nx*ny)) print("validation accuracy,", accuracy_score(valid_labels, logreg_model_clf.predict(d2_valid_dataset))) nsamples, nx, ny = test_dataset.shape d2_train_dataset = test_dataset.reshape((nsamples,nx*ny)) print("test accuracy,", accuracy_score(test_labels, logreg_model_clf.predict(d2_train_dataset)))
machine-learning/deep-learning/udacity/ud730/1_notmnist.ipynb
pk-ai/training
mit
ac55573f88f8aecb42213c101aea44b0
Now the Hotels
url = 'http://www.bringfido.com/lodging/city/new_haven_ct_us' r = Render(url) result = r.frame.toHtml() #QString should be converted to string before processed by lxml formatted_result = str(result.toAscii()) tree = html.fromstring(formatted_result) #Now using correct Xpath we are fetching URL of archives archive_links = tree.xpath('//*[@id="results_list"]/div') print(archive_links) print('') for lnk in archive_links: print(lnk.xpath('div[2]/h1/a/text()')[0]) print(lnk.text_content()) print('*'*25)
code/.ipynb_checkpoints/bf_qt_scraping-checkpoint.ipynb
mattgiguere/doglodge
mit
0dad4a002a85d4128e82972f628210f4
Now Get the Links
links = [] for lnk in archive_links: print(lnk.xpath('div/h1/a/@href')[0]) links.append(lnk.xpath('div/h1/a/@href')[0]) print('*'*25) lnk.xpath('//*/div/h1/a/@href')[0] links
code/.ipynb_checkpoints/bf_qt_scraping-checkpoint.ipynb
mattgiguere/doglodge
mit
18ce64fb9059e3e60bef3a13c81d1e55
Loading Reviews Next, we want to step through each page, and scrape the reviews for each hotel.
url_base = 'http://www.bringfido.com' r.update_url(url_base+links[0]) result = r.frame.toHtml() #QString should be converted to string before processed by lxml formatted_result = str(result.toAscii()) tree = html.fromstring(formatted_result) hotel_description = tree.xpath('//*[@class="body"]/text()') details = tree.xpath('//*[@class="address"]/text()') address = details[0] csczip = details[1] phone = details[2] #Now using correct Xpath we are fetching URL of archives reviews = tree.xpath('//*[@class="review_container"]') texts = [] titles = [] authors = [] ratings = [] print(reviews) print('') for rev in reviews: titles.append(rev.xpath('div/div[1]/text()')[0]) authors.append(rev.xpath('div/div[2]/text()')[0]) texts.append(rev.xpath('div/div[3]/text()')[0]) ratings.append(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1]) print(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1]) titles authors texts ratings
code/.ipynb_checkpoints/bf_qt_scraping-checkpoint.ipynb
mattgiguere/doglodge
mit
b27fd89a7b47769c94c1673e7278e7a4
Load software and filenames definitions
from fretbursts import * init_notebook() from IPython.display import display
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
47e0ff2b92861240cb04601f3ff5b247
Data folder:
data_dir = './data/singlespot/' import os data_dir = os.path.abspath(data_dir) + '/' assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
56aec3452991a4b4c8d60e799b2b2a35
List of data files:
from glob import glob file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f) ## Selection for POLIMI 2012-11-26 datatset labels = ['17d', '27d', '7d', '12d', '22d'] files_dict = {lab: fname for lab, fname in zip(labels, file_list)} files_dict data_id
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
79cdb3fcac01636b7697430b571f2e97
Data load Initial loading of the data:
d = loader.photon_hdf5(filename=files_dict[data_id])
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
37a7891b19abfcee9823b14e076093aa
Load the leakage coefficient from disk:
leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv' leakage = np.loadtxt(leakage_coeff_fname) print('Leakage coefficient:', leakage)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
889e8252b6e71b1b4bf80b26caee5cdf
Load the direct excitation coefficient ($d_{exAA}$) from disk:
dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv' dir_ex_aa = np.loadtxt(dir_ex_coeff_fname) print('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
be92387d4839a369474cd7dd42b27de9
Load the gamma-factor ($\gamma$) from disk:
gamma_fname = 'results/usALEX - gamma factor - all-ph.csv' gamma = np.loadtxt(gamma_fname) print('Gamma-factor:', gamma)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
7f9e9e1041a92484c706218c95e1799a
Update d with the correction coefficients:
d.leakage = leakage d.dir_ex = dir_ex_aa d.gamma = gamma
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
38a860887e206256996248ed17d08d95
Laser alternation selection At this point we have only the timestamps and the detector numbers:
d.ph_times_t[0][:3], d.ph_times_t[0][-3:]#, d.det_t print('First and last timestamps: {:10,} {:10,}'.format(d.ph_times_t[0][0], d.ph_times_t[0][-1])) print('Total number of timestamps: {:10,}'.format(d.ph_times_t[0].size))
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
5584608570616e4f821b77cea82471eb
We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations:
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
d6b0e8ccc8f4f27a9a6d0c450e1ff77f
We should check if everithing is OK with an alternation histogram:
plot_alternation_hist(d)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
591741af6097c466bacd173d78f58abf
If the plot looks good we can apply the parameters with:
loader.alex_apply_period(d) print('D+A photons in D-excitation period: {:10,}'.format(d.D_ex[0].sum())) print('D+A photons in A-excitation period: {:10,}'.format(d.A_ex[0].sum()))
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
b6ec7eaac6b92af0a599000bff505cff
Measurements infos All the measurement data is in the d variable. We can print it:
d
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
109da54e2aa3543e6bc7d97edbe35b35
Or check the measurements duration:
d.time_max
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
fd48a016b647f118d43ea51b84c9bea3
Compute background Compute the background using automatic threshold:
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7) dplot(d, timetrace_bg) d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
e0525a70fbe478c2adb58e9b74fd9180
Burst search and selection
d.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel('all')) print(d.ph_sel) dplot(d, hist_fret); # if data_id in ['7d', '27d']: # ds = d.select_bursts(select_bursts.size, th1=20) # else: # ds = d.select_bursts(select_bursts.size, th1=30) ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30) n_bursts_all = ds.num_bursts[0] def select_and_plot_ES(fret_sel, do_sel): ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel) ds_do = ds.select_bursts(select_bursts.ES, **do_sel) bpl.plot_ES_selection(ax, **fret_sel) bpl.plot_ES_selection(ax, **do_sel) return ds_fret, ds_do ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1) if data_id == '7d': fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False) do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '12d': fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False) do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '17d': fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False) do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '22d': fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False) do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '27d': fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False) do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) n_bursts_do = ds_do.num_bursts[0] n_bursts_fret = ds_fret.num_bursts[0] n_bursts_do, n_bursts_fret d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret) print('D-only fraction:', d_only_frac) dplot(ds_fret, hist2d_alex, scatter_alpha=0.1); dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
e9a36a91fcab39bbd7c605b763bfe2ca
Donor Leakage fit
bandwidth = 0.03 E_range_do = (-0.1, 0.15) E_ax = np.r_[-0.2:0.401:0.0002] E_pr_do_kde = bext.fit_bursts_kde_peak(ds_do, bandwidth=bandwidth, weights='size', x_range=E_range_do, x_ax=E_ax, save_fitter=True) mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, bins=np.r_[E_ax.min(): E_ax.max(): bandwidth]) plt.xlim(-0.3, 0.5) print("%s: E_peak = %.2f%%" % (ds.ph_sel, E_pr_do_kde*100))
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
6d9c983847f1643e58d6c245e932ff94
Burst sizes
nt_th1 = 50 dplot(ds_fret, hist_size, which='all', add_naa=False) xlim(-0, 250) plt.axvline(nt_th1) Th_nt = np.arange(35, 120) nt_th = np.zeros(Th_nt.size) for i, th in enumerate(Th_nt): ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th) nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th plt.figure() plot(Th_nt, nt_th) plt.axvline(nt_th1) nt_mean = nt_th[np.where(Th_nt == nt_th1)][0] nt_mean
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
ad1ee085cd102f175cb08ce0dd9f997d
Fret fit Max position of the Kernel Density Estimation (KDE):
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size') E_fitter = ds_fret.E_fitter E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03]) E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5)) E_fitter.fit_res[0].params.pretty_print() fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(E_fitter, ax=ax[0]) mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1]) print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100)) display(E_fitter.params*100)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
b4105322e1aa02420fb80c4d6fd1ec48
Weighted mean of $E$ of each burst:
ds_fret.fit_E_m(weights='size')
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
e14f6e8c2409d12293fed2b56182b5a1
Gaussian fit (no weights):
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
62e8b440f2386b568c90251a0a39a931
Gaussian fit (using burst size as weights):
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size') E_kde_w = E_fitter.kde_max_pos[0] E_gauss_w = E_fitter.params.loc[0, 'center'] E_gauss_w_sig = E_fitter.params.loc[0, 'sigma'] E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0])) E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
73541258f960ab996a73fb24d66a352b
Stoichiometry fit Max position of the Kernel Density Estimation (KDE):
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True) S_fitter = ds_fret.S_fitter S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03]) S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5) fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(S_fitter, ax=ax[0]) mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1]) print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100)) display(S_fitter.params*100) S_kde = S_fitter.kde_max_pos[0] S_gauss = S_fitter.params.loc[0, 'center'] S_gauss_sig = S_fitter.params.loc[0, 'sigma'] S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0])) S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
e99df1d5affce07801654509080b2af0
The Maximum likelihood fit for a Gaussian population is the mean:
S = ds_fret.S[0] S_ml_fit = (S.mean(), S.std()) S_ml_fit
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
bd96aa0228a11a1dc199ae1f776e29a7
Computing the weighted mean and weighted standard deviation we get:
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.) S_mean = np.dot(weights, S)/weights.sum() S_std_dev = np.sqrt( np.dot(weights, (S - S_mean)**2)/weights.sum()) S_wmean_fit = [S_mean, S_std_dev] S_wmean_fit
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
3b302254346f04ccdfffb8f09bcec338
Save data to file
sample = data_id
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
d8cf23eeaedb92cb5e9e2e859002c406
The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
variables = ('sample n_bursts_all n_bursts_do n_bursts_fret ' 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr ' 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr ' 'E_pr_do_kde nt_mean\n')
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
07a910fbdcbd8cfcd06fff742b1ef178
This is just a trick to format the different variables:
variables_csv = variables.replace(' ', ',') fmt_float = '{%s:.6f}' fmt_int = '{%s:d}' fmt_str = '{%s}' fmt_dict = {**{'sample': fmt_str}, **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}} var_dict = {name: eval(name) for name in variables.split()} var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n' data_str = var_fmt.format(**var_dict) print(variables_csv) print(data_str) # NOTE: The file name should be the notebook name but with .csv extension with open('results/usALEX-5samples-E-corrected-all-ph.csv', 'a') as f: f.seek(0, 2) if f.tell() == 0: f.write(variables_csv) f.write(data_str)
out_notebooks/usALEX-5samples-E-corrected-all-ph-out-12d.ipynb
tritemio/multispot_paper
mit
00ef80b1b69f079c18b9da97abaf8bc3
Data folder:
data_dir = './data/singlespot/'
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
f42126936d752a819da0ed54e682f39d
Check that the folder exists:
import os data_dir = os.path.abspath(data_dir) + '/' assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
08c3702500c8721fa2faadd92a9ff552
List of data files in data_dir:
from glob import glob file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f) file_list ## Selection for POLIMI 2012-12-6 dataset # file_list.pop(2) # file_list = file_list[1:-2] # display(file_list) # labels = ['22d', '27d', '17d', '12d', '7d'] ## Selection for P.E. 2012-12-6 dataset # file_list.pop(1) # file_list = file_list[:-1] # display(file_list) # labels = ['22d', '27d', '17d', '12d', '7d'] ## Selection for POLIMI 2012-11-26 datatset labels = ['17d', '27d', '7d', '12d', '22d'] files_dict = {lab: fname for lab, fname in zip(labels, file_list)} files_dict ph_sel_map = {'all-ph': Ph_sel('all'), 'AexAem': Ph_sel(Aex='Aem')} ph_sel = ph_sel_map[ph_sel_name] data_id, ph_sel_name
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
11348005908a5f1d10eceff28cf7506c
Laser alternation selection At this point we have only the timestamps and the detector numbers:
d.ph_times_t, d.det_t
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
d74b75388064abbb5661a8fb58d115fd
If the plot looks good we can apply the parameters with:
loader.alex_apply_period(d)
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
92bd1049f044e09f078770a25769f03f
Burst search and selection
from mpl_toolkits.axes_grid1 import AxesGrid import lmfit print('lmfit version:', lmfit.__version__) assert d.dir_ex == 0 assert d.leakage == 0 d.burst_search(m=10, F=6, ph_sel=ph_sel) print(d.ph_sel, d.num_bursts) ds_sa = d.select_bursts(select_bursts.naa, th1=30) ds_sa.num_bursts
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
f144a4545d7c7f4319a79015db1b6e84
Preliminary selection and plots
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30 ds_saw = d.select_bursts_mask_apply([mask]) ds_sas0 = ds_sa.select_bursts(select_bursts.S, S2=0.10) ds_sas = ds_sa.select_bursts(select_bursts.S, S2=0.15) ds_sas2 = ds_sa.select_bursts(select_bursts.S, S2=0.20) ds_sas3 = ds_sa.select_bursts(select_bursts.S, S2=0.25) ds_st = d.select_bursts(select_bursts.size, add_naa=True, th1=30) ds_sas.num_bursts dx = ds_sas0 size = dx.na[0] + dx.nd[0] s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True) s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0]) plot(s_ax, s_hist, '-o', alpha=0.5) dx = ds_sas size = dx.na[0] + dx.nd[0] s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True) s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0]) plot(s_ax, s_hist, '-o', alpha=0.5) dx = ds_sas2 size = dx.na[0] + dx.nd[0] s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True) s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0]) plot(s_ax, s_hist, '-o', alpha=0.5) dx = ds_sas3 size = dx.na[0] + dx.nd[0] s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True) s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0]) plot(s_ax, s_hist, '-o', alpha=0.5) plt.title('(nd + na) for A-only population using different S cutoff'); dx = ds_sa alex_jointplot(dx); dplot(ds_sa, hist_S)
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
98c552a29c40b85efe10975551a57837
A-direct excitation fitting To extract the A-direct excitation coefficient we need to fit the S values for the A-only population. The S value for the A-only population is fitted with different methods: - Histogram git with 2 Gaussians or with 2 asymmetric Gaussians (an asymmetric Gaussian has right- and left-side of the peak decreasing according to different sigmas). - KDE maximum In the following we apply these methods using different selection or weighting schemes to reduce amount of FRET population and make fitting of the A-only population easier. Even selection Here A-only and FRET population are evenly selected.
dx = ds_sa bin_width = 0.03 bandwidth = 0.03 bins = np.r_[-0.2 : 1 : bin_width] x_kde = np.arange(bins.min(), bins.max(), 0.0002) ## Weights weights = None ## Histogram fit fitter_g = mfit.MultiFitter(dx.S) fitter_g.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_g.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4)) S_hist_orig = fitter_g.hist_pdf S_2peaks = fitter_g.params.loc[0, 'p1_center'] dir_ex_S2p = S_2peaks/(1 - S_2peaks) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p) ## KDE fitter_g.calc_kde(bandwidth=bandwidth) fitter_g.find_kde_max(x_kde, xmin=0, xmax=0.15) S_peak = fitter_g.kde_max_pos[0] dir_ex_S_kde = S_peak/(1 - S_peak) print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde) fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(fitter_g, ax=ax[0]) ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100)) mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=True) ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak*100)); ## 2-Asym-Gaussian fitter_ag = mfit.MultiFitter(dx.S) fitter_ag.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_ag.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.1, p2_center=0.4)) #print(fitter_ag.fit_obj[0].model.fit_report()) S_2peaks_a = fitter_ag.params.loc[0, 'p1_center'] dir_ex_S2pa = S_2peaks_a/(1 - S_2peaks_a) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2pa) fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(fitter_g, ax=ax[0]) ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100)) mfit.plot_mfit(fitter_ag, ax=ax[1]) ax[1].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_a*100));
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
169b510bac694f3f03951b313ce129f7
Zero threshold on nd Select bursts with: $$n_d < 0$$.
dx = ds_sa.select_bursts(select_bursts.nd, th1=-100, th2=0) fitter = bext.bursts_fitter(dx, 'S') fitter.fit_histogram(model = mfit.factory_gaussian(center=0.1)) S_1peaks_th = fitter.params.loc[0, 'center'] dir_ex_S1p = S_1peaks_th/(1 - S_1peaks_th) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S1p) mfit.plot_mfit(fitter) plt.xlim(-0.1, 0.6)
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
815a88879911f62a6a409a125ca61aaf
Selection 1 Bursts are weighted using $w = f(S)$, where the function $f(S)$ is a Gaussian fitted to the $S$ histogram of the FRET population.
dx = ds_sa ## Weights weights = 1 - mfit.gaussian(dx.S[0], fitter_g.params.loc[0, 'p2_center'], fitter_g.params.loc[0, 'p2_sigma']) weights[dx.S[0] >= fitter_g.params.loc[0, 'p2_center']] = 0 ## Histogram fit fitter_w1 = mfit.MultiFitter(dx.S) fitter_w1.weights = [weights] fitter_w1.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_w1.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4)) S_2peaks_w1 = fitter_w1.params.loc[0, 'p1_center'] dir_ex_S2p_w1 = S_2peaks_w1/(1 - S_2peaks_w1) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w1) ## KDE fitter_w1.calc_kde(bandwidth=bandwidth) fitter_w1.find_kde_max(x_kde, xmin=0, xmax=0.15) S_peak_w1 = fitter_w1.kde_max_pos[0] dir_ex_S_kde_w1 = S_peak_w1/(1 - S_peak_w1) print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w1) def plot_weights(x, weights, ax): ax2 = ax.twinx() x_sort = x.argsort() ax2.plot(x[x_sort], weights[x_sort], color='k', lw=4, alpha=0.4) ax2.set_ylabel('Weights'); fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(fitter_w1, ax=ax[0]) mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False) plot_weights(dx.S[0], weights, ax=ax[0]) ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w1*100)) mfit.plot_mfit(fitter_w1, ax=ax[1], plot_model=False, plot_kde=True) mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False) plot_weights(dx.S[0], weights, ax=ax[1]) ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w1*100));
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
ec54b38b38a9c63b3b559573c8a90cc7
Selection 2 Bursts are here weighted using weights $w$: $$w = n_{aa} - |n_a + n_d|$$
## Weights sizes = dx.nd[0] + dx.na[0] #- dir_ex_S_kde_w3*dx.naa[0] weights = dx.naa[0] - abs(sizes) weights[weights < 0] = 0 ## Histogram fitter_w4 = mfit.MultiFitter(dx.S) fitter_w4.weights = [weights] fitter_w4.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_w4.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4)) S_2peaks_w4 = fitter_w4.params.loc[0, 'p1_center'] dir_ex_S2p_w4 = S_2peaks_w4/(1 - S_2peaks_w4) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w4) ## KDE fitter_w4.calc_kde(bandwidth=bandwidth) fitter_w4.find_kde_max(x_kde, xmin=0, xmax=0.15) S_peak_w4 = fitter_w4.kde_max_pos[0] dir_ex_S_kde_w4 = S_peak_w4/(1 - S_peak_w4) print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w4) fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(fitter_w4, ax=ax[0]) mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False) #plot_weights(dx.S[0], weights, ax=ax[0]) ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w4*100)) mfit.plot_mfit(fitter_w4, ax=ax[1], plot_model=False, plot_kde=True) mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False) #plot_weights(dx.S[0], weights, ax=ax[1]) ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w4*100));
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
0b728ab52c324ad96e31d6584a4e4a51
Selection 3 Bursts are here selected according to: $$n_{aa} - |n_a + n_d| > 30$$
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30 ds_saw = d.select_bursts_mask_apply([mask]) print(ds_saw.num_bursts) dx = ds_saw ## Weights weights = None ## 2-Gaussians fitter_w5 = mfit.MultiFitter(dx.S) fitter_w5.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_w5.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4)) S_2peaks_w5 = fitter_w5.params.loc[0, 'p1_center'] dir_ex_S2p_w5 = S_2peaks_w5/(1 - S_2peaks_w5) print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w5) ## KDE fitter_w5.calc_kde(bandwidth=bandwidth) fitter_w5.find_kde_max(x_kde, xmin=0, xmax=0.15) S_peak_w5 = fitter_w5.kde_max_pos[0] S_2peaks_w5_fiterr = fitter_w5.fit_res[0].params['p1_center'].stderr dir_ex_S_kde_w5 = S_peak_w5/(1 - S_peak_w5) print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w5) ## 2-Asym-Gaussians fitter_w5a = mfit.MultiFitter(dx.S) fitter_w5a.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth]) fitter_w5a.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.05, p2_center=0.3)) S_2peaks_w5a = fitter_w5a.params.loc[0, 'p1_center'] dir_ex_S2p_w5a = S_2peaks_w5a/(1 - S_2peaks_w5a) #print(fitter_w5a.fit_obj[0].model.fit_report(min_correl=0.5)) print('Fitted direct excitation (na/naa) [2-Asym-Gauss]:', dir_ex_S2p_w5a) fig, ax = plt.subplots(1, 3, figsize=(19, 4.5)) mfit.plot_mfit(fitter_w5, ax=ax[0]) mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False) ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5*100)) mfit.plot_mfit(fitter_w5, ax=ax[1], plot_model=False, plot_kde=True) mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False) ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w5*100)); mfit.plot_mfit(fitter_w5a, ax=ax[2]) mfit.plot_mfit(fitter_g, ax=ax[2], plot_model=False, plot_kde=False) ax[2].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5a*100));
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
c2f522b324f5e787cf64226520e11c12
Save data to file
sample = data_id n_bursts_aa = ds_sas.num_bursts[0]
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
0ad2f0683c601e72142ed744807b1f70
The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
variables = ('sample n_bursts_aa dir_ex_S1p dir_ex_S_kde dir_ex_S2p dir_ex_S2pa ' 'dir_ex_S2p_w1 dir_ex_S_kde_w1 dir_ex_S_kde_w4 dir_ex_S_kde_w5 dir_ex_S2p_w5 dir_ex_S2p_w5a ' 'S_2peaks_w5 S_2peaks_w5_fiterr\n')
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
4f72236585ee3ad099e3d3d6adfb7853
This is just a trick to format the different variables:
variables_csv = variables.replace(' ', ',') fmt_float = '{%s:.6f}' fmt_int = '{%s:d}' fmt_str = '{%s}' fmt_dict = {**{'sample': fmt_str}, **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}} var_dict = {name: eval(name) for name in variables.split()} var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n' data_str = var_fmt.format(**var_dict) print(variables_csv) print(data_str) # NOTE: The file name should be the notebook name but with .csv extension with open('results/usALEX-5samples-PR-raw-dir_ex_aa-fit-%s.csv' % ph_sel_name, 'a') as f: f.seek(0, 2) if f.tell() == 0: f.write(variables_csv) f.write(data_str)
out_notebooks/usALEX-5samples-PR-raw-dir_ex_aa-fit-out-AexAem-17d.ipynb
tritemio/multispot_paper
mit
84a823ed6c01418fc1a65cfdada27abe
1. Get Arxiv data about machine learning Write a APi querier and extract papers with the terms machine learning or artificial intelligence. Get 2000 results... and play nice!
class Arxiv_querier(): ''' This class takes as an input a query and the number of results, and returns all the parsed results. Includes routines to deal with multiple pages of results. ''' def __init__(self,base_url="http://export.arxiv.org/api/query?"): ''' Initialise ''' self.base_url = base_url def query(self,query_string,max_results=100,wait_time=3): ''' Query the base url ''' #Attribute query string #Load base URL base_url = self.base_url #Prepare query string processed_query = re.sub(' ','+',query_string) self.query_string="_".join(query_string.split(" ")) start=0 pages = 0 #Run the query and store results for as long as the number of results is bigger than the max results keep_running = True result_store = [] while keep_running==True: pages +=1 print(pages) #Query url (NB the start arg, which will change as we go through different #pages) query_url = base_url+'search_query=all:{q}&start={s}&max_results={max_res}'.format( q=processed_query,s=start,max_res=max_results) #Download source = requests.get(query_url) #Parse the xml and get the entries (papers) parsed = feedparser.parse(source.content) #Extract entries entries = parsed['entries'] #If the number of entries is bigger than the maximum number of results #this means we need to go to another page. We do that by offseting the #start with max results result_store.append(entries) if len(entries)==max_results: start+=max_results #If we have less than max results this means we have run out of #results and we toggle the keep_running switch off. if len(entries)<max_results: keep_running=False time.sleep(wait_time) #Save results in a flat list self.entry_results = [x for el in result_store for x in el] def extract_data(self): ''' Here we extract data from the entries ''' #Load entries entries = self.entry_results #Create df output = pd.concat([pd.DataFrame({ 'query':self.query_string, 'id':x['id'], 'link':x['link'], 'title':x['title'], 'authors':", ".join([el['name'] for el in x['authors']]), 'summary':x['summary'], 'updated':x['updated'], 'published':x['published'], 'category':x['arxiv_primary_category']['term'], 'pdf':str([el['href'] for el in x['links'] if el['type']=='application/pdf'][0] )},index=[0]) for x in entries]).reset_index(drop=True) output['year_published'] = [x.split("-")[0] for x in output['published']] self.output_df = output query_terms = ['artificial intelligence','machine learning','deep learning'] #There are some inconsistencies in the number of results so we run the query three times for each #term and remove duplicated results def extract_arxiv_data(term,max_results=1000,wait_time=10, tests=3): ''' This function initialises the Arxiv_querier class, extracts the data and outputs it ''' print(term) collected = [] #We collect the data thrice for i in np.arange(tests): print('run'+ ' ' +str(i)) initialised = Arxiv_querier() initialised.query(term,max_results,wait_time) initialised.extract_data() out = initialised.output_df collected.append(out) #We concatenate the dfs and remove the duplicates. output = pd.concat(collected) output_no_dupes = output.drop_duplicates('id') #Return both return([output,output_no_dupes]) arxiv_ai_results_three = [extract_arxiv_data(term=q) for q in query_terms] all_papers = pd.concat([x[1] for x in arxiv_ai_results_three]).drop_duplicates('id').reset_index(drop=True) print(all_papers.shape) all_papers.head() all_papers.to_csv(int_data+'/{today}_ai_papers.csv'.format(today=today_str),index=False)
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
e57b184ed38d7c6af5f9b24cad661e6e
2. Some exploratory analysis
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize, RegexpTokenizer, PunktSentenceTokenizer from nltk.stem import WordNetLemmatizer, SnowballStemmer, PorterStemmer import scipy import ast import string as st from bs4 import BeautifulSoup import gensim from gensim.models.coherencemodel import CoherenceModel from sklearn.feature_extraction.text import TfidfVectorizer from itertools import product stopwords_c = stopwords.words('english') stemmer = PorterStemmer() lemmatizer= WordNetLemmatizer() #Read papers all_papers = pd.read_csv(int_data+'/19_8_2017_ai_papers.csv'.format(today=today_str)) #Let's begin by looking at years #When where they published? #Year distribution year_pubs = all_papers['year_published'].value_counts() year_pubs.index = [int(x) for x in year_pubs.index] fig,ax = plt.subplots(figsize=(10,5)) year_pubs_sorted = year_pubs[sorted(year_pubs.index)] year_pubs_subset = year_pubs_sorted[year_pubs_sorted.index>1991] ax.plot(np.arange(1993,2018),year_pubs_subset.cumsum(),color='red') ax.bar(np.arange(1993,2018),year_pubs_subset) ax.hlines(xmin=1993,xmax=2017,y=[10000,20000,30000,40000],colors='green',linestyles='dashed',alpha=0.7) ax.set_title("Papers on AI, ML and DL, total per year (bar) and cumulative (red)",size=14) #What are the categories of the papers? Are we capturing what we think we are capturing #Top 20 all_papers['category'].value_counts()[:20]
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
a5d61fd9c07f8dba0dde544e8278cf0d
See <a href='https://arxiv.org/help/api/user-manual'>here</a> for abbreviations of categories. In a nutshell, AI is AI, LG is 'Learning', CV is 'Computer Vision', 'CL' is 'computation and language' and NE is 'Neural and Evolutionary computing'. SL.ML is kind of self-explanatory. We seem to be picking up the main things
#NB do we want to remove hyphens? punct = re.sub('-','',st.punctuation) def comp_sentence(sentence): ''' Takes a sentence and pre-processes it. The output is the sentence as a bag of words ''' #Remove line breaks and hyphens sentence = re.sub('\n',' ',sentence) sentence = re.sub('-',' ',sentence) #Lowercase and tokenise text_lowered = [x.lower() for x in sentence.split(" ")] #Remove signs and digits text_no_signs_digits = ["".join([x for x in el if x not in punct+st.digits]) for el in text_lowered] #Remove stop words, single letters text_stopped = [w for w in text_no_signs_digits if w not in stopwords_c and len(w)>1] #Stem text_lemmatised = [lemmatizer.lemmatize(w) for w in text_stopped] #Output return(text_lemmatised) #Process text clean_corpus = [comp_sentence(x) for x in all_papers['summary']] #We remove rate words word_freqs = pd.Series([x for el in clean_corpus for x in el]).value_counts() word_freqs[:30] rare_words = word_freqs.index[word_freqs<=2] rare_words[:10]
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
6e7b3522ffb4f502cd9adc753500f596
Lots of the rare words seem to be typos and so forth. We remove them
#Removing rare words clean_corpus_no_rare = [[x for x in el if x not in rare_words] for el in clean_corpus]
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
e65b63c12c04234706f75a702ddf17f7
2 NLP (topic modelling & word embeddings)
#Identify 2-grams (frequent in science!) bigram_transformer = gensim.models.Phrases(clean_corpus_no_rare) #Train the model on the corpus #Let's do a bit of grid search #model = gensim.models.Word2Vec(bigram_transformer[clean_corpus], size=360, window=15, min_count=2, iter=20) model.most_similar('ai_safety') model.most_similar('complexity') model.most_similar('github') #Create 3 different dictionaries and bows depending on word sizes def remove_words_below_threshold(corpus,threshold): ''' Takes a list of terms and removes any which are below a threshold of occurrences ''' #Produce token frequencies token_frequencies = pd.Series([x for el in corpus for x in el]).value_counts() #Identify tokens to drop (below a threshold) tokens_to_drop = token_frequencies.index[token_frequencies<=threshold] #Processed corpus processed_corpus = [[x for x in el if x not in tokens_to_drop] for el in corpus] #Dictionary dictionary = gensim.corpora.Dictionary(processed_corpus) corpus_bow = [dictionary.doc2bow(x) for x in processed_corpus] return([dictionary,corpus_bow,processed_corpus]) #Initial model run to see what comes out. #Transform corpus to bigrams transformed_corpus = bigram_transformer[clean_corpus] corpora_to_process = {str(x):remove_words_below_threshold(transformed_corpus,x) for x in [1,2,5,10]} #Need to turn this into a function. #Topic modelling #Parameters for Grid search. lda_params = list(product([100,200,300],[2,5])) #Model container lda_models = [] for x in lda_params: #Print stage print('{x}_{y}'.format(x=x[0],y=x[1])) #Load corpus and dict dictionary = corpora_to_process[str(x[1])][0] corpus_bow = corpora_to_process[str(x[1])][1] corpus = corpora_to_process[str(x[1])][2] print('training') #Train model mod = gensim.models.LdaModel(corpus_bow,num_topics=x[0],id2word=dictionary, passes=10,iterations=50) print('coherence') #Extract coherence cm = CoherenceModel(mod,texts=corpus, dictionary=dictionary,coherence='u_mass') #Get value try: coherence_value = cm.get_coherence() except: print('coherence_error') coherence_value='error' lda_models.append([x,mod,[coherence_value,cm]]) with open(mod_path+'/{t}_ai_topic_models.p'.format(t=today_str),'wb') as outfile: pickle.dump(lda_models,outfile) #Visualiase model performance model_eval = pd.DataFrame([[x[0][0],x[0][1],x[2][0]] for x in lda_models],columns=['topics','word_lim','coherence']) fig,ax = plt.subplots(figsize=(10,5)) cols = ['red','green','blue'] legs = [] for num,x in enumerate(set(model_eval['word_lim'])): subset = model_eval.loc[[z == x for z in model_eval['word_lim']],:] ax.plot(subset.loc[:,'topics'],subset.loc[:,'coherence'],color=cols[num-1]) legs.append([cols[num-1],x]) ax.legend(labels=[x[1] for x in legs],title='Min word count') ax.set_title('Model performance with different parameters') with open(mod_path+'/19_8_2017_ai_topic_models.p','rb') as infile: lda_models = pickle.load(infile) check_model= lda_models[1][1] #Explore topics via LDAvis import pyLDAvis.gensim pyLDAvis.enable_notebook() pyLDAvis.gensim.prepare( #Insert best model/corpus/topics here check_model, corpora_to_process[str(5)][1], corpora_to_process[str(5)][0]) #Can we extract the relevant terms for the topics as in Sievert and Shirley in order to name them? #First - create a matrix with top 30 terms per topic top_30_kws = [check_model.get_topic_terms(topicid=n,topn=1000) for n in np.arange(0,100)] #Keyword df where the columns are tokens and the rows are topics top_30_kws_df = pd.concat([pd.DataFrame([x[1] for x in el], index=[x[0] for x in el]) for el in top_30_kws], axis=1).fillna(0).T.reset_index(drop=True) #This is the dictionary selected_dictionary = corpora_to_process[str(5)][0] #Total number of terms in the document total_terms = np.sum([vals for vals in selected_dictionary.dfs.values()]) #Appearances of different terms document_freqs = pd.Series([v for v in selected_dictionary.dfs.values()], index=[k for k in selected_dictionary.dfs.keys()])[top_30_kws_df.columns]/total_terms #Normalise the terms (divide the vector of probabilities of each keywords in each topic by the totals) top_30_kws_normalised = top_30_kws_df.apply(lambda x: x/document_freqs,axis=0) #Now we want to extract, for each topic, the relevance score. def relevance_score(prob_in_topic,prob_in_corpus,id2word_lookup,lambda_par = 0.6): ''' Combines the probabilities using the definition in Sievert and Shirley and returns the top 5 named #terms for each topic ''' #Create dataframe combined = pd.concat([prob_in_topic,prob_in_corpus],axis=1) combined.columns=['prob_in_topic','prob_in_corpus'] #Create relevance metric combined['relevance'] = lambda_par*combined['prob_in_topic'] + (1-lambda_par)*combined['prob_in_corpus'] #Top words top_ids = list(combined.sort_values('relevance',ascending=False).index[:5]) #Top words top_words = "_".join([id2word_lookup[this_id] for this_id in top_ids]) return(top_words) relevance_scores = [relevance_score(top_30_kws_df.iloc[n,:], top_30_kws_normalised.iloc[n,:], dictionary.id2token,lambda_par=0.6) for n in np.arange(len(top_30_kws_df))] %%time #Create a df with the topic predictions. paper_preds = check_model[corpora_to_process[str(5)][1]] paper_topics_df = pd.concat([pd.DataFrame([x[1] for x in el],index=[x[0] for x in el]) for el in paper_preds], axis=1).T #Replace NAs with zeros and drop pointless index paper_topics_df.fillna(value=0,inplace=True) paper_topics_df.reset_index(drop=True,inplace=True) paper_topics_df.columns = relevance_scores paper_topics_df.to_csv(int_data+'/{t}_paper_topic_mix.csv'.format(t=today_str),index=False) #paper_topics_df = pd.read_csv(int_data+'/{t}_paper_topic_mix.csv') #Quick test of Deep learning papers #These are papers with a topic that seems to capture deep learning dl_papers = [x>0.05 for x in paper_topics_df['network_training_model_deep_deep_learning']] dl_papers_metadata = pd.concat([pd.Series(dl_papers),all_papers],axis=1) paper_frequencies = pd.crosstab(dl_papers_metadata.year_published,dl_papers_metadata[0]) paper_frequencies.columns=['no_dl','dl'] fig,ax = plt.subplots(figsize=(10,5)) paper_frequencies.plot.bar(stacked=True,ax=ax) ax.set_title('Number of papers in the DL \'topic\'') ax.legend(labels=['Not ANN/DL related','NN/DL topic >0.05'])
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
785fb57951c954e0661649fe7a2a747a
Some of this is interesting. Doesn't seem to be picking up the policy related terms (safety, discrimination) Next stages - focus on policy related terms. Can we look for papers in keyword dictionaries identified through the word embeddings? Obtain Google Scholar data
#How many authors are there in the data? Can we collect all their institutions from Google Scholar paper_authors = pd.Series([x for el in all_papers['authors'] for x in el.split(", ")]) paper_authors_unique = paper_authors.drop_duplicates() len(paper_authors_unique)
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
4e46c0ea73599e33af1ddf23a4996b32
We have 68,000 authors. It might take a while to get their data from Google Scholar
#Top authors and frequencies authors_freq = paper_authors.value_counts() fig,ax=plt.subplots(figsize=(10,3)) ax.hist(authors_freq,bins=30) ax.set_title('Distribution of publications') #Pretty skewed distribution! print(authors_freq.describe()) np.sum(authors_freq>2)
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
44d86c964fccf06c344f9c7a38618e9d
Less than 10,000 authors with 3+ papers in the data
get_scholar_data( %%time #Test run import scholarly @ratelim.patient(max_calls=30,time_interval=60) def get_scholar_data(scholarly_object): '''''' try: scholarly_object = next(scholarly_object) metadata = {} metadata['name']=scholarly_object.name metadata['affiliation'] = scholarly_object.affiliation metadata['interests'] = scholarly_object.interests return(metadata) except: return('nothing') #Extract information from each query (it is a generator) #Get data #ml_author_gscholar=[] for num,x in enumerate(paper_authors_unique[1484:]): if num % 100 == 0: print(str(num)+":"+x) result = get_scholar_data(scholarly.search_author(x)) ml_author_gscholar.append(result) len(ml_author_gscholar)
notebooks/ml_topic_analysis_exploration.ipynb
Juan-Mateos/coll_int_ai_case
mit
5d8549fc8c1c9f106b80f46333a76760
1. General Mixture Models pomegranate has a very efficient implementation of mixture models, particularly Gaussian mixture models. Lets take a look at how fast pomegranate is versus sklearn, and then see how much faster parallelization can get it to be.
n, d, k = 1000000, 5, 3 X, y = create_dataset(n, d, k) print "sklearn GMM" %timeit GaussianMixture(n_components=k, covariance_type='full', max_iter=15, tol=1e-10).fit(X) print print "pomegranate GMM" %timeit GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=15, stop_threshold=1e-10) print print "pomegranate GMM (4 jobs)" %timeit GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, n_jobs=4, max_iterations=15, stop_threshold=1e-10)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
7158a0d0fda8819f27ff0cf1762d11c0
It looks like on a large dataset not only is pomegranate faster than sklearn at performing 15 iterations of EM on 3 million 5 dimensional datapoints with 3 clusters, but the parallelization is able to help in speeding things up. Lets now take a look at the time it takes to make predictions using GMMs. Lets fit the model to a small amount of data, and then predict a larger amount of data drawn from the same underlying distributions.
d, k = 25, 2 X, y = create_dataset(1000, d, k) a = GaussianMixture(k, n_init=1, max_iter=25).fit(X) b = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=25) del X, y n = 1000000 X, y = create_dataset(n, d, k) print "sklearn GMM" %timeit -n 1 a.predict_proba(X) print print "pomegranate GMM" %timeit -n 1 b.predict_proba(X) print print "pomegranate GMM (4 jobs)" %timeit -n 1 b.predict_proba(X, n_jobs=4)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
f69d331cb420b58af4f3948dd4753016
It looks like pomegranate can be slightly slower than sklearn when using a single processor, but that it can be parallelized to get faster performance. At the same time, predictions at this level happen so quickly (millions per second) that this may not be the most reliable test for parallelization. To ensure that we're getting the exact same results just faster, lets subtract the predictions from each other and make sure that the sum is equal to 0.
print (b.predict_proba(X) - b.predict_proba(X, n_jobs=4)).sum()
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
8142bc4f5a30801de0a3b536027070a0
Great, no difference between the two. Lets now make sure that pomegranate and sklearn are learning basically the same thing. Lets fit both models to some 2 dimensional 2 component data and make sure that they both extract the underlying clusters by plotting them.
d, k = 2, 2 X, y = create_dataset(1000, d, k, alpha=2) a = GaussianMixture(k, n_init=1, max_iter=25).fit(X) b = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, k, X, max_iterations=25) y1, y2 = a.predict(X), b.predict(X) plt.figure(figsize=(16,6)) plt.subplot(121) plt.title("sklearn clusters", fontsize=14) plt.scatter(X[y1==0, 0], X[y1==0, 1], color='m', edgecolor='m') plt.scatter(X[y1==1, 0], X[y1==1, 1], color='c', edgecolor='c') plt.subplot(122) plt.title("pomegranate clusters", fontsize=14) plt.scatter(X[y2==0, 0], X[y2==0, 1], color='m', edgecolor='m') plt.scatter(X[y2==1, 0], X[y2==1, 1], color='c', edgecolor='c')
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
a86925764262f13149ab5580eb45b21a
It looks like we're getting the same basic results for the two. The two algorithms are initialized a bit differently, and so it can be difficult to directly compare the results between them, but it looks like they're getting roughly the same results. 3. Multivariate Gaussian HMM Now let's move on to training a hidden Markov model with multivariate Gaussian emissions with a diagonal covariance matrix. We'll randomly generate some Gaussian distributed numbers and use pomegranate with either one or four threads to fit our model to the data.
X = numpy.random.randn(1000, 500, 50) print "pomegranate Gaussian HMM (1 job)" %timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5) print print "pomegranate Gaussian HMM (2 jobs)" %timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5, n_jobs=2) print print "pomegranate Gaussian HMM (2 jobs)" %timeit -n 1 -r 1 HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=5, n_jobs=4)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
ab9c8570f2c04ccac08d9e4d42b8fa83
All we had to do was pass in the n_jobs parameter to the fit function in order to get a speed improvement. It looks like we're getting a really good speed improvement, as well! This is mostly because the HMM algorithms perform a lot more operations than the other models, and so spend the vast majority of time with the GIL released. You may not notice as strong speedups when using a MultivariateGaussianDistribution because BLAS uses multithreaded operations already internally, even when only one job is specified. Now lets look at the prediction function to make sure the we're getting speedups there as well. You'll have to use a wrapper function to parallelize the predictions for a HMM because it returns an annotated sequence rather than a single value like a classic machine learning model might.
model = HiddenMarkovModel.from_samples(NormalDistribution, 5, X, max_iterations=2, verbose=False) print "pomegranate Gaussian HMM (1 job)" %timeit predict_proba(model, X) print print "pomegranate Gaussian HMM (2 jobs)" %timeit predict_proba(model, X, n_jobs=2)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
e9927e10fa37915c50c03569213394bf
Great, we're getting a really good speedup on that as well! Looks like the parallel processing is more efficient with a bigger, more complex model, than with a simple one. This can make sense, because all inference/training is more complex, and so there is more time with the GIL released compared to with the simpler operations. 4. Mixture of Hidden Markov Models Let's stack another layer onto this model by making it a mixture of these hidden Markov models, instead of a single one. At this point we're sticking a multivariate Gaussian HMM into a mixture and we're going to train this big thing in parallel.
def create_model(mus): n = mus.shape[0] starts = numpy.zeros(n) starts[0] = 1. ends = numpy.zeros(n) ends[-1] = 0.5 transition_matrix = numpy.zeros((n, n)) distributions = [] for i in range(n): transition_matrix[i, i] = 0.5 if i < n - 1: transition_matrix[i, i+1] = 0.5 distribution = IndependentComponentsDistribution([NormalDistribution(mu, 1) for mu in mus[i]]) distributions.append(distribution) model = HiddenMarkovModel.from_matrix(transition_matrix, distributions, starts, ends) return model def create_mixture(mus): hmms = [create_model(mu) for mu in mus] return GeneralMixtureModel(hmms) n, d = 50, 10 mus = [(numpy.random.randn(d, n)*0.2 + numpy.random.randn(n)*2).T for i in range(2)] model = create_mixture(mus) X = numpy.random.randn(400, 150, d) print "pomegranate Mixture of Gaussian HMMs (1 job)" %timeit model.fit(X, max_iterations=5) print model = create_mixture(mus) print "pomegranate Mixture of Gaussian HMMs (2 jobs)" %timeit model.fit(X, max_iterations=5, n_jobs=2)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
d41646736155f637122e199d99a7818b
Looks like we're getting a really nice speed improvement when training this complex model. Let's take a look now at the time it takes to do inference with it.
model = create_mixture(mus) print "pomegranate Mixture of Gaussian HMMs (1 job)" %timeit model.predict_proba(X) print model = create_mixture(mus) print "pomegranate Mixture of Gaussian HMMs (2 jobs)" %timeit model.predict_proba(X, n_jobs=2)
tutorials/old/Tutorial_7_Parallelization.ipynb
jmschrei/pomegranate
mit
a9e3d7276f512bc4e055330c5b5aa4dc
The inner product of blades in GAlgebra is zero if either operand is a scalar: $$\begin{split}\begin{aligned} {\boldsymbol{A}}{r}{\wedge}{\boldsymbol{B}}{s} &\equiv {\left <{{\boldsymbol{A}}{r}{\boldsymbol{B}}{s}} \right >{r+s}} \ {\boldsymbol{A}}{r}\cdot{\boldsymbol{B}}{s} &\equiv {\left { { \begin{array}{cc} r\mbox{ and }s \ne 0: & {\left <{{\boldsymbol{A}}{r}{\boldsymbol{B}}{s}} \right >{{\left |{r-s}\right |}}} \ r\mbox{ or }s = 0: & 0 \end{array}} \right }} \end{aligned}\end{split}$$ This definition comes from David Hestenes and Garret Sobczyk, “Clifford Algebra to Geometric Calculus,” Kluwer Academic Publishers, 1984. In some other literature, the inner product is defined without the exceptional case for scalar part and the definition above is known as "the modified Hestenes inner product" (this name comes from the source code of GAViewer).
c|a a|c c|A A|c
examples/ipython/inner_product.ipynb
arsenovic/galgebra
bsd-3-clause
3138b903c13bff2f8e8a6b4b889045ce
$ab=a \wedge b + a \cdot b$ holds for vectors:
a*b a^b a|b (a*b)-(a^b)-(a|b)
examples/ipython/inner_product.ipynb
arsenovic/galgebra
bsd-3-clause
f648557f5e71db2fd149ca1b54cea7e3
$aA=a \wedge A + a \cdot A$ holds for the products between vectors and multivectors:
a*A a^A a|A (a*A)-(a^A)-(a|A)
examples/ipython/inner_product.ipynb
arsenovic/galgebra
bsd-3-clause
bf0f42f827f6b8bb59f50fdcd0604dfe
$AB=A \wedge B + A \cdot B$ does NOT hold for the products between multivectors and multivectors:
A*B A|B (A*B)-(A^B)-(A|B) (A<B)+(A|B)+(A>B)-A*B
examples/ipython/inner_product.ipynb
arsenovic/galgebra
bsd-3-clause
7d522bc4f7ce01a9656e0d362c637d69
Toolkit: Visualization Functions This class will introduce 3 different visualizations that can be used with the two different classification type neural networks and regression neural networks. Confusion Matrix - For any type of classification neural network. ROC Curve - For binary classification. Lift Curve - For regression neural networks. The code used to produce these visualizations is shown here:
%matplotlib inline import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc # Plot a confusion matrix. # cm is the confusion matrix, names are the names of the classes. def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Plot an ROC. pred - the predictions, y - the expected output. def plot_roc(pred,y): fpr, tpr, _ = roc_curve(y_test, pred) roc_auc = auc(fpr, tpr) plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic (ROC)') plt.legend(loc="lower right") plt.show() # Plot a lift curve. pred - the predictions, y - the expected output. def chart_regression(pred,y): t = pd.DataFrame({'pred' : pred.flatten(), 'y' : y_test.flatten()}) t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show()
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
89dcb63bc9b3f82ddcb00adbc9b7c219
Binary Classification Binary classification is used to create a model that classifies between only two classes. These two classes are often called "positive" and "negative". Consider the following program that uses the wcbreast_wdbc dataset to classify if a breast tumor is cancerous (malignant) or not (benign). The iris dataset is not binary, because there are three classes (3 types of iris).
import os import pandas as pd from sklearn.cross_validation import train_test_split import tensorflow.contrib.learn as skflow import numpy as np from sklearn import metrics path = "./data/" filename = os.path.join(path,"wcbreast_wdbc.csv") df = pd.read_csv(filename,na_values=['NA','?']) # Encode feature vector df.drop('id',axis=1,inplace=True) encode_numeric_zscore(df,'mean_radius') encode_text_index(df,'mean_texture') encode_text_index(df,'mean_perimeter') encode_text_index(df,'mean_area') encode_text_index(df,'mean_smoothness') encode_text_index(df,'mean_compactness') encode_text_index(df,'mean_concavity') encode_text_index(df,'mean_concave_points') encode_text_index(df,'mean_symmetry') encode_text_index(df,'mean_fractal_dimension') encode_text_index(df,'se_radius') encode_text_index(df,'se_texture') encode_text_index(df,'se_perimeter') encode_text_index(df,'se_area') encode_text_index(df,'se_smoothness') encode_text_index(df,'se_compactness') encode_text_index(df,'se_concavity') encode_text_index(df,'se_concave_points') encode_text_index(df,'se_symmetry') encode_text_index(df,'se_fractal_dimension') encode_text_index(df,'worst_radius') encode_text_index(df,'worst_texture') encode_text_index(df,'worst_perimeter') encode_text_index(df,'worst_area') encode_text_index(df,'worst_smoothness') encode_text_index(df,'worst_compactness') encode_text_index(df,'worst_concavity') encode_text_index(df,'worst_concave_points') encode_text_index(df,'worst_symmetry') encode_text_index(df,'worst_fractal_dimension') diagnosis = encode_text_index(df,'diagnosis') num_classes = len(diagnosis) # Create x & y for training # Create the x-side (feature vectors) of the training x, y = to_xy(df,'diagnosis') # Split into train/test x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=42) # Create a deep neural network with 3 hidden layers of 10, 20, 10 classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes, steps=10000) # Early stopping early_stop = skflow.monitors.ValidationMonitor(x_test, y_test, early_stopping_rounds=200, print_steps=50, n_classes=num_classes) # Fit/train neural network classifier.fit(x_train, y_train, early_stop) # Measure accuracy score = metrics.accuracy_score(y, classifier.predict(x)) print("Final accuracy: {}".format(score))
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
b27514c1fc91e6cae12607a32f97055d
Confusion Matrix The confusion matrix is a common visualization for both binary and larger classification problems. Often a model will have difficulty differentiating between two classes. For example, a neural network might be really good at telling the difference between cats and dogs, but not so good at telling the difference between dogs and wolves. The following code generates a confusion matrix:
import numpy as np from sklearn import svm, datasets from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix pred = classifier.predict(x_test) # Compute confusion matrix cm = confusion_matrix(y_test, pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm, diagnosis) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, diagnosis, title='Normalized confusion matrix') plt.show()
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
51b80bc6fb2cb8e643b6c969336cdd4e
The above two confusion matrixes show the same network. The bottom (normalized) is the type you will normally see. Notice the two labels. The label "B" means benign (no cancer) and the label "M" means malignant (cancer). The left-right (x) axis are the predictions, the top-bottom) are the expected outcomes. A perfect model (that never makes an error) has a dark blue diagonal that runs from top-left to bottom-right. To read, consider the top-left square. This square indicates "true labeled" of B and also "predicted label" of B. This is good! The prediction matched the truth. The blueness of this box represents how often "B" is classified correct. It is not darkest blue. This is because the square to the right(which is off the perfect diagonal) has some color. This square indicates truth of "B" but prediction of "M". The white square, at the bottom-left, indicates a true of "M" but predicted of "B". The whiteness indicates this rarely happens. Your conclusion from the above chart is that the model sometimes classifies "B" as "M" (a false negative), but never mis-classifis "M" as "B". Always look for the dark diagonal, this is good! ROC Curves ROC curves can be a bit confusing. However, they are very common. It is important to know how to read them. Even their name is confusing. Do not worry about their name, it comes from electrical engineering (EE). Binary classification is common in medical testing. Often you want to diagnose if someone has a disease. This can lead to two types of errors, know as false positives and false negatives: False Positive - Your test (neural network) indicated that the patient had the disease; however, the patient did not have the disease. False Negative - Your test (neural network) indicated that the patient did not have the disease; however, the patient did have the disease. True Positive - Your test (neural network) correctly identified that the patient had the disease. True Negative - Your test (neural network) correctly identified that the patient did not have the disease. Types of errors: Neural networks classify in terms of probbility of it being positive. However, at what probability do you give a positive result? Is the cutoff 50%? 90%? Where you set this cutoff is called the threshold. Anything above the cutoff is positive, anything below is negative. Setting this cutoff allows the model to be more sensative or specific: The following shows a more sensitive cutoff: An ROC curve measures how good a model is regardless of the cutoff. The following shows how to read a ROC chart: The following code shows an ROC chart for the breast cancer neural network. The area under the curve (AUC) is also an important measure. The larger the AUC, the better.
pred = classifier.predict_proba(x_test) pred = pred[:,1] # Only positive cases # print(pred[:,1]) plot_roc(pred,y_test)
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
34809d5975e90ad22255f9db37c6c816
Classification We've already seen multi-class classification, with the iris dataset. Confusion matrixes work just fine with 3 classes. The following code generates a confusion matrix for iris.
import os import pandas as pd from sklearn.cross_validation import train_test_split import tensorflow.contrib.learn as skflow import numpy as np path = "./data/" filename = os.path.join(path,"iris.csv") df = pd.read_csv(filename,na_values=['NA','?']) # Encode feature vector encode_numeric_zscore(df,'petal_w') encode_numeric_zscore(df,'petal_l') encode_numeric_zscore(df,'sepal_w') encode_numeric_zscore(df,'sepal_l') species = encode_text_index(df,"species") num_classes = len(species) # Create x & y for training # Create the x-side (feature vectors) of the training x, y = to_xy(df,'species') # Split into train/test x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=45) # as much as I would like to use 42, it gives a perfect result, and a boring confusion matrix! # Create a deep neural network with 3 hidden layers of 10, 20, 10 classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=num_classes, steps=10000) # Early stopping early_stop = skflow.monitors.ValidationMonitor(x_test, y_test, early_stopping_rounds=200, print_steps=50, n_classes=num_classes) # Fit/train neural network classifier.fit(x_train, y_train, early_stop) import numpy as np from sklearn import svm, datasets from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix pred = classifier.predict(x_test) # Compute confusion matrix cm = confusion_matrix(y_test, pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm, species) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, species, title='Normalized confusion matrix') plt.show()
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
34421ff1ea103c2cd8fe1f5f380233aa
See the strong diagonal? Iris is easy. See the light blue near the bottom? Sometimes virginica is confused for versicolor. Regression We've already seen regression with the MPG dataset. Regression uses its own set of visualizations, one of the most common is the lift chart. The following code generates a lift chart.
import tensorflow.contrib.learn as skflow import pandas as pd import os import numpy as np from sklearn import metrics from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) # create feature vector missing_median(df, 'horsepower') df.drop('name',1,inplace=True) encode_numeric_zscore(df, 'horsepower') encode_numeric_zscore(df, 'weight') encode_numeric_zscore(df, 'cylinders') encode_numeric_zscore(df, 'displacement') encode_numeric_zscore(df, 'acceleration') encode_text_dummy(df, 'origin') # Encode to a 2D matrix for training x,y = to_xy(df,['mpg']) # Split into train/test x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=42) # Create a deep neural network with 3 hidden layers of 50, 25, 10 regressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000) # Early stopping early_stop = skflow.monitors.ValidationMonitor(x_test, y_test, early_stopping_rounds=200, print_steps=50) # Fit/train neural network regressor.fit(x_train, y_train, early_stop) pred = regressor.predict(x_test) chart_regression(pred,y_test)
t81_558_class4_class_reg.ipynb
jbliss1234/ML
apache-2.0
f4563e338d71e9e91f14c32b3d99826b
Reordering the Callendar-Van Duzen equation we obtain the following $$ AT+BT^2+C(T-100)T^3 =\frac{R(T)}{R_0}-1 \enspace,$$ which we can write in matrix form as $Mx=p$, where $$\begin{bmatrix} T_1 & T_1^2 & (T_1-100)T_1^3 \ T_2 & T_2^2 & (T_2-100)T_2^3 \ T_3 & T_3^2 & (T_3-100)T_3^3\end{bmatrix} \begin{bmatrix} A\ B \ C\end{bmatrix} = \begin{bmatrix} \frac{R(T_1)}{R_0}-1 \ \frac{R(T_2)}{R_0}-1 \ \frac{R(T_3)}{R_0}-1\end{bmatrix} \enspace.$$ Because $M$ is square we can solve by computing $M^{-1}$ directly.
R0=25; M=np.array([[T_exp[0],(T_exp[0])**2,(T_exp[0]-100)*(T_exp[0])**3],[T_exp[1],(T_exp[1])**2,(T_exp[1]-100)*(T_exp[1])**3],[T_exp[2],(T_exp[2])**2,(T_exp[2]-100)*(T_exp[2])**3]]); p=np.array([[(R_exp[0]/R0)-1],[(R_exp[1]/R0)-1],[(R_exp[2]/R0)-1]]); x = np.linalg.solve(M,p) #solve linear equations system np.set_printoptions(precision=3) print('M') print(M) print('\n') print('p') print(p) print('\n') print('x') print(x)
notebooks/Ex_2_3.ipynb
agmarrugo/sensors-actuators
mit
7db5f99f0ac40319bebff90824c39546
We have found the coeffiecients $A$, $B$, and $C$ necessary to describe the sensor's transfer function. Now we plot it from -200 C a 600 C.
A=x[0];B=x[1];C=x[2]; T_range= np.arange(start = -200, stop = 601, step = 1); R_funT= R0*(1+A[0]*T_range+B[0]*(T_range)**2+C[0]*(T_range-100)*(T_range)**3); plt.plot(T_range,R_funT,T_exp[0],R_exp[0],'ro',T_exp[1],R_exp[1],'ro',T_exp[2],R_exp[2],'ro'); plt.ylabel('Sensor resistance [Ohm]') plt.xlabel('Temperature [C]') plt.show()
notebooks/Ex_2_3.ipynb
agmarrugo/sensors-actuators
mit
3243e7aff2174500d9af71e92bfb40ff
Reddy Mikks model Given the following variables: $\begin{aligned} x_1 = \textrm{Tons of exterior paint produced daily} \newline x_2 = \textrm{Tons of interior paint produced daily} \end{aligned}$ and knowing that we want to maximize the profit, where \$5000 is the profit from exterior paint and \$4000 is the profit from a ton of interior paint, the Reddy Mikks model is: $$\textrm{Maximize } z = 5x_1 + 4x_2$$ subject to $$6x_1 + 4x_2 \le 24$$ $$x_1 + 2x_2 \le 6$$ $$-x_1 + x_2 \le 1$$ $$x_2 \le 2$$ $$x_1, x_2 \ge 0$$
reddymikks = pywraplp.Solver('Reddy_Mikks', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) x1 = reddymikks.NumVar(0, reddymikks.infinity(), 'x1') x2 = reddymikks.NumVar(0, reddymikks.infinity(), 'x2') reddymikks.Add(6*x1 + 4*x2 <= 24) reddymikks.Add(x1 + 2*x2 <= 6) reddymikks.Add(-x1 + x2 <= 1) reddymikks.Add(x2 <= 2) profit = reddymikks.Objective() profit.SetCoefficient(x1, 5) profit.SetCoefficient(x2, 4) profit.SetMaximization() status = reddymikks.Solve() if status not in [reddymikks.OPTIMAL, reddymikks.FEASIBLE]: raise Exception('No feasible solution found') print("The company should produce",round(x1.solution_value(),2),"tons of exterior paint") print("The company should produce",round(x2.solution_value(),2),"tons of interior paint") print("The optimal profit is", profit.Value(), 'thousand USD')
Linear Programming with OR-Tools.ipynb
rayjustinhuang/DataAnalysisandMachineLearning
mit
f243751ee53efa484f2829c988e4523e
More simple problems A company that operates 10 hours a day manufactures two products on three sequential processes. The following data characterizes the problem:
import pandas as pd problemdata = pd.DataFrame({'Process 1': [10, 5], 'Process 2':[6, 20], 'Process 3':[8, 10], 'Unit profit':[20, 30]}) problemdata.index = ['Product 1', 'Product 2'] problemdata
Linear Programming with OR-Tools.ipynb
rayjustinhuang/DataAnalysisandMachineLearning
mit
52d9b715a98068a0e709257b64b2eebd
Where there are 10 hours a day dedicated to production. Process times are given in minutes per unit while profit is given in USD. The optimal mix of the two products would be characterized by the following model: $\begin{aligned} x_1 = \textrm{Units of product 1} \newline x_2 = \textrm{Units of product 2} \end{aligned}$ $$\textrm{Maximize } z = 20x_1 + 30x_2$$ subject to $$\begin{array}{rcl} 10x_1 + 5x_2 \le 600 \newline 6x_1 + 20x_2 \le 600 \newline 8x_1 + 10x_2 \le 600 \newline x_1, x_2 \ge 0 \end{array}$$ (we will assume that continuous solution values are acceptable for this problem)
simpleprod = pywraplp.Solver('Simple_Production', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) x1 = simpleprod.NumVar(0, simpleprod.infinity(), 'x1') x2 = simpleprod.NumVar(0, simpleprod.infinity(), 'x2') for i in problemdata.columns[:-1]: simpleprod.Add(problemdata.loc[problemdata.index[0], i]*x1 + problemdata.loc[problemdata.index[1], i]*x2 <= 600) profit = simpleprod.Objective() profit.SetCoefficient(x1, 20) profit.SetCoefficient(x2, 30) profit.SetMaximization() status = simpleprod.Solve() if status not in [simpleprod.OPTIMAL, simpleprod.FEASIBLE]: raise Exception('No feasible solution found') print("The company should produce",round(x1.solution_value(),2),"units of product 1") print("The company should produce",round(x2.solution_value(),2),"units of product 2") print("The optimal profit is", round(profit.Value(),2), 'USD')
Linear Programming with OR-Tools.ipynb
rayjustinhuang/DataAnalysisandMachineLearning
mit
2fe42172012fa1bc64e4ae1dcc5314e4
1. Download Text8 Corpus
import os.path if not os.path.isfile('text8'): !wget -c http://mattmahoney.net/dc/text8.zip !unzip text8.zip
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
4678a63c9ce82679aaf289c15759fa80
Import & Set up Logging I'm not going to set up logging due to the verbose input displaying in notebooks, but if you want that, uncomment the lines in the cell below.
LOGS = False if LOGS: import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
f0ef7c69ac884ad5564a33378d8f846b
2. Build Word2Vec Model
from gensim.models import Word2Vec, KeyedVectors from gensim.models.word2vec import Text8Corpus # Using params from Word2Vec_FastText_Comparison params = { 'alpha': 0.05, 'size': 100, 'window': 5, 'iter': 5, 'min_count': 5, 'sample': 1e-4, 'sg': 1, 'hs': 0, 'negative': 5 } model = Word2Vec(Text8Corpus('text8'), **params) print(model)
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
fc17a13f3fe28d8680bfe5257cd71311
See the Word2Vec tutorial for how to initialize and save this model. Comparing the traditional implementation, Annoy and Nmslib approximation
# Set up the model and vector that we are using in the comparison from gensim.similarities.index import AnnoyIndexer from gensim.similarities.nmslib import NmslibIndexer model.init_sims() annoy_index = AnnoyIndexer(model, 300) nmslib_index = NmslibIndexer(model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10}) # Dry run to make sure both indices are fully in RAM vector = model.wv.syn0norm[0] print(model.most_similar([vector], topn=5, indexer=annoy_index)) print(model.most_similar([vector], topn=5, indexer=nmslib_index)) print(model.most_similar([vector], topn=5)) import time import numpy as np def avg_query_time(annoy_index=None, queries=1000): """ Average query time of a most_similar method over 1000 random queries, uses annoy if given an indexer """ total_time = 0 for _ in range(queries): rand_vec = model.wv.syn0norm[np.random.randint(0, len(model.wv.vocab))] start_time = time.clock() model.most_similar([rand_vec], topn=5, indexer=annoy_index) total_time += time.clock() - start_time return total_time / queries queries = 10000 gensim_time = avg_query_time(queries=queries) annoy_time = avg_query_time(annoy_index, queries=queries) nmslib_time = avg_query_time(nmslib_index, queries=queries) print("Gensim (s/query):\t{0:.5f}".format(gensim_time)) print("Annoy (s/query):\t{0:.5f}".format(annoy_time)) print("Nmslib (s/query):\t{0:.5f}".format(nmslib_time)) speed_improvement_gensim = gensim_time / nmslib_time speed_improvement_annoy = annoy_time / nmslib_time print ("\nNmslib is {0:.2f} times faster on average on this particular run".format(speed_improvement_gensim)) print ("\nNmslib is {0:.2f} times faster on average than annoy on this particular run".format(speed_improvement_annoy))
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
ffb87a38e5bca6024353d35229e0d193
3. Construct Nmslib Index with model & make a similarity query Creating an indexer An instance of NmslibIndexer needs to be created in order to use Nmslib in gensim. The NmslibIndexer class is located in gensim.similarities.nmslib NmslibIndexer() takes three parameters: model: A Word2Vec or Doc2Vec model index_params: Parameters for building nmslib indexer. index_params effects the build time and the index size. The example is {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}. Increasing the value of M and efConstruction improves the accuracy of search. However this also leads to longer indexing times. indexThreadQty is the number of thread. query_time_params: Parameters for querying on nmslib indexer. query_time_params effects the query time and the search accuracy. The example is {'efSearch': 100}. A larger efSearch will give more accurate results, but larger query time. More information can be found here. The relationship between parameters, build/query time, and accuracy will be investigated later in the tutorial. Now that we are ready to make a query, lets find the top 5 most similar words to "science" in the Text8 corpus. To make a similarity query we call Word2Vec.most_similar like we would traditionally, but with an added parameter, indexer. The only supported indexerers in gensim as of now are Annoy and Nmslib.
# Building nmslib indexer nmslib_index = NmslibIndexer(model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10}) # Derive the vector for the word "science" in our model vector = model["science"] # The instance of AnnoyIndexer we just created is passed approximate_neighbors = model.most_similar([vector], topn=11, indexer=nmslib_index) # Neatly print the approximate_neighbors and their corresponding cosine similarity values print("Approximate Neighbors") for neighbor in approximate_neighbors: print(neighbor) normal_neighbors = model.most_similar([vector], topn=11) print("\nNormal (not nmslib-indexed) Neighbors") for neighbor in normal_neighbors: print(neighbor)
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
1c70c2757f4b48c6deaeca166e3b8e79
Analyzing the results The closer the cosine similarity of a vector is to 1, the more similar that word is to our query, which was the vector for "science". In this case the results are almostly same. 4. Verify & Evaluate performance Persisting Indexes You can save and load your indexes from/to disk to prevent having to construct them each time. This will create two files on disk, fname and fname.d. Both files are needed to correctly restore all attributes.
import os fname = '/tmp/mymodel.index' # Persist index to disk nmslib_index.save(fname) # Load index back if os.path.exists(fname): nmslib_index2 = NmslibIndexer.load(fname) nmslib_index2.model = model # Results should be identical to above vector = model["science"] approximate_neighbors2 = model.most_similar([vector], topn=11, indexer=nmslib_index2) for neighbor in approximate_neighbors2: print(neighbor) assert approximate_neighbors == approximate_neighbors2
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
59acc77292e77b772844fc725869a50b
Be sure to use the same model at load that was used originally, otherwise you will get unexpected behaviors. Save memory by memory-mapping indices saved to disk Nmslib library has a useful feature that indices can be memory-mapped from disk. It saves memory when the same index is used by several processes. Below are two snippets of code. First one has a separate index for each process. The second snipped shares the index between two processes via memory-mapping. The second example uses less total RAM as it is shared.
# Remove verbosity from code below (if logging active) if LOGS: logging.disable(logging.CRITICAL) from multiprocessing import Process import psutil
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
5970207a8929390e737543510397c346
Bad Example: Two processes load the Word2vec model from disk and create there own Nmslib indices from that model.
%%time model.save('/tmp/mymodel.pkl') def f(process_id): print('Process Id: {}'.format(os.getpid())) process = psutil.Process(os.getpid()) new_model = Word2Vec.load('/tmp/mymodel.pkl') vector = new_model["science"] nmslib_index = NmslibIndexer(new_model, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 10}) approximate_neighbors = new_model.most_similar([vector], topn=5, indexer=nmslib_index) print('\nMemory used by process {}: {}\n---'.format(os.getpid(), process.memory_info())) # Creating and running two parallel process to share the same index file. p1 = Process(target=f, args=('1',)) p1.start() p1.join() p2 = Process(target=f, args=('2',)) p2.start() p2.join()
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
eede62242fedfce7d494336fe357cc48
Good example. Two processes load both the Word2vec model and index from disk and memory-map the index
%%time model.save('/tmp/mymodel.pkl') def f(process_id): print('Process Id: {}'.format(os.getpid())) process = psutil.Process(os.getpid()) new_model = Word2Vec.load('/tmp/mymodel.pkl') vector = new_model["science"] nmslib_index = NmslibIndexer.load('/tmp/mymodel.index') nmslib_index.model = new_model approximate_neighbors = new_model.most_similar([vector], topn=5, indexer=nmslib_index) print('\nMemory used by process {}: {}\n---'.format(os.getpid(), process.memory_info())) # Creating and running two parallel process to share the same index file. p1 = Process(target=f, args=('1',)) p1.start() p1.join() p2 = Process(target=f, args=('2',)) p2.start() p2.join()
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
b8954e443e1cfd057f9b22afc3b7b195
5. Evaluate relationship of parameters to initialization/query time and accuracy, compared with annoy
import matplotlib.pyplot as plt %matplotlib inline
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
2ddc1d6bd950beccdc8fee2dbeca5d61
Build dataset of Initialization times and accuracy measures
exact_results = [element[0] for element in model.most_similar([model.wv.syn0norm[0]], topn=100)] # For calculating query time queries = 1000 def create_evaluation_graph(x_values, y_values_init, y_values_accuracy, y_values_query, param_name): plt.figure(1, figsize=(12, 6)) plt.subplot(231) plt.plot(x_values, y_values_init) plt.title("{} vs initalization time".format(param_name)) plt.ylabel("Initialization time (s)") plt.xlabel(param_name) plt.subplot(232) plt.plot(x_values, y_values_accuracy) plt.title("{} vs accuracy".format(param_name)) plt.ylabel("% accuracy") plt.xlabel(param_name) plt.tight_layout() plt.subplot(233) plt.plot(y_values_init, y_values_accuracy) plt.title("Initialization time vs accuracy") plt.ylabel("% accuracy") plt.xlabel("Initialization time (s)") plt.tight_layout() plt.subplot(234) plt.plot(x_values, y_values_query) plt.title("{} vs query time".format(param_name)) plt.ylabel("query time") plt.xlabel(param_name) plt.tight_layout() plt.subplot(235) plt.plot(y_values_query, y_values_accuracy) plt.title("query time vs accuracy") plt.ylabel("% accuracy") plt.xlabel("query time (s)") plt.tight_layout() plt.show() def evaluate_nmslib_performance(parameter, is_parameter_query, parameter_start, parameter_end, parameter_step): nmslib_x_values = [] nmslib_y_values_init = [] nmslib_y_values_accuracy = [] nmslib_y_values_query = [] index_params = {'M': 100, 'indexThreadQty': 10, 'efConstruction': 100, 'post': 0} query_params = {'efSearch': 100} for x in range(parameter_start, parameter_end, parameter_step): nmslib_x_values.append(x) start_time = time.time() if is_parameter_query: query_params[parameter] = x else: index_params[parameter] = x nmslib_index = NmslibIndexer(model , index_params , query_params) nmslib_y_values_init.append(time.time() - start_time) approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=nmslib_index) top_words = [result[0] for result in approximate_results] nmslib_y_values_accuracy.append(len(set(top_words).intersection(exact_results))) nmslib_y_values_query.append(avg_query_time(nmslib_index, queries=queries)) create_evaluation_graph(nmslib_x_values, nmslib_y_values_init, nmslib_y_values_accuracy, nmslib_y_values_query, parameter) # Evaluate nmslib indexer, changing the parameter M evaluate_nmslib_performance("M", False, 50, 401, 50) # Evaluate nmslib indexer, changing the parameter efConstruction evaluate_nmslib_performance("efConstruction", False, 50, 1001, 100) # Evaluate nmslib indexer, changing the parameter efSearch evaluate_nmslib_performance("efSearch", True, 50, 401, 100) # Evaluate annoy indexer, changing the parameter num_tree annoy_x_values = [] annoy_y_values_init = [] annoy_y_values_accuracy = [] annoy_y_values_query = [] for x in range(100, 401, 50): annoy_x_values.append(x) start_time = time.time() annoy_index = AnnoyIndexer(model, x) annoy_y_values_init.append(time.time() - start_time) approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=annoy_index) top_words = [result[0] for result in approximate_results] annoy_y_values_accuracy.append(len(set(top_words).intersection(exact_results))) annoy_y_values_query.append(avg_query_time(annoy_index, queries=queries)) create_evaluation_graph(annoy_x_values, annoy_y_values_init, annoy_y_values_accuracy, annoy_y_values_query, "num_tree") # nmslib indexer changing the parameter M, efConstruction, efSearch nmslib_y_values_init = [] nmslib_y_values_accuracy = [] nmslib_y_values_query = [] for M in [100, 200]: for efConstruction in [100, 200]: for efSearch in [100, 200]: start_time = time.time() nmslib_index = NmslibIndexer(model, {'M': M, 'indexThreadQty': 10, 'efConstruction': efConstruction, 'post': 0}, {'efSearch': efSearch}) nmslib_y_values_init.append(time.time() - start_time) approximate_results = model.most_similar([model.wv.syn0norm[0]], topn=100, indexer=nmslib_index) top_words = [result[0] for result in approximate_results] nmslib_y_values_accuracy.append(len(set(top_words).intersection(exact_results))) nmslib_y_values_query.append(avg_query_time(nmslib_index, queries=queries)) # Make a comparison between annoy and nmslib indexer plt.figure(1, figsize=(12, 6)) plt.subplot(121) plt.scatter(nmslib_y_values_init, nmslib_y_values_accuracy, label="nmslib", color='r', marker='o') plt.scatter(annoy_y_values_init, annoy_y_values_accuracy, label="annoy", color='b', marker='x') plt.legend() plt.title("Initialization time vs accuracy. Upper left is better.") plt.ylabel("% accuracy") plt.xlabel("Initialization time (s)") plt.subplot(122) plt.scatter(nmslib_y_values_query, nmslib_y_values_accuracy, label="nmslib", color='r', marker='o') plt.scatter(annoy_y_values_query, annoy_y_values_accuracy, label="annoy", color='b', marker='x') plt.legend() plt.title("Query time vs accuracy. Upper left is better.") plt.ylabel("% accuracy") plt.xlabel("Query time (s)") plt.xlim(min(nmslib_y_values_query+annoy_y_values_query), max(nmslib_y_values_query+annoy_y_values_query)) plt.tight_layout() plt.show()
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
d6af813852dbd41dade37d108676e066
6. Work with Google word2vec files Our model can be exported to a word2vec C format. There is a binary and a plain text word2vec format. Both can be read with a variety of other software, or imported back into gensim as a KeyedVectors object.
# To export our model as text model.wv.save_word2vec_format('/tmp/vectors.txt', binary=False) from smart_open import open # View the first 3 lines of the exported file # The first line has the total number of entries and the vector dimension count. # The next lines have a key (a string) followed by its vector. with open('/tmp/vectors.txt') as myfile: for i in range(3): print(myfile.readline().strip()) # To import a word2vec text model wv = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # To export our model as binary model.wv.save_word2vec_format('/tmp/vectors.bin', binary=True) # To import a word2vec binary model wv = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # To create and save Nmslib Index from a loaded `KeyedVectors` object nmslib_index = NmslibIndexer(wv, {'M': 100, 'indexThreadQty': 1, 'efConstruction': 100}, {'efSearch': 100}) nmslib_index.save('/tmp/mymodel.index') # Load and test the saved word vectors and saved nmslib index wv = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) nmslib_index = NmslibIndexer.load('/tmp/mymodel.index') nmslib_index.model = wv vector = wv["cat"] approximate_neighbors = wv.most_similar([vector], topn=11, indexer=nmslib_index) # Neatly print the approximate_neighbors and their corresponding cosine similarity values print("Approximate Neighbors") for neighbor in approximate_neighbors: print(neighbor) normal_neighbors = wv.most_similar([vector], topn=11) print("\nNormal (not Nmslib-indexed) Neighbors") for neighbor in normal_neighbors: print(neighbor)
docs/notebooks/nmslibtutorial.ipynb
RaRe-Technologies/gensim
lgpl-2.1
5fcb09a02cb361e10b03cd87aceea978
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
60
Edit dataset card