danf0 commited on
Commit
85ed75d
1 Parent(s): d225144

Update the examples.

Browse files
Files changed (1) hide show
  1. README.md +9 -9
README.md CHANGED
@@ -44,7 +44,7 @@ To calculate the score, pass a list of samples and a similarity function or a st
44
  "See Spot run.",
45
  "Run, Spot, run.",
46
  "Jane sees Spot run."]
47
- >>> results = vendiscore.compute(samples, k="ngram_overlap", ns=[1, 2])
48
  >>> print(results)
49
  {'VS': 3.90657...}
50
  ```
@@ -91,7 +91,7 @@ Given n samples, the value of the Vendi Score ranges between 1 and n, with highe
91
  >>> vendiscore = evaluate.load("danf0/vendiscore")
92
  >>> samples = [0, 0, 10, 10, 20, 20]
93
  >>> k = lambda a, b: np.exp(-np.abs(a - b))
94
- >>> vendiscore.compute(samples, k)
95
  2.9999
96
  ```
97
 
@@ -100,7 +100,7 @@ If you already have precomputed a similarity matrix:
100
  >>> K = np.array([[1.0, 0.9, 0.0],
101
  [0.9, 1.0, 0.0],
102
  [0.0, 0.0, 1.0]])
103
- >>> vendiscore.compute(K, score_K=True)
104
  2.1573
105
  ```
106
 
@@ -110,7 +110,7 @@ to compute the Vendi Score using the covariance matrix, `X @ X.T`.
110
  (If the rows of `X` are not normalized, set `normalize = True`.)
111
  ```
112
  >>> X = np.array([[100, 0], [99, 1], [1, 99], [0, 100])
113
- >>> vendiscore.compute(X, score_dual=True, normalize=True)
114
  1.9989...
115
  ```
116
 
@@ -120,8 +120,8 @@ The default embeddings are from the pool-2048 layer of the torchvision version o
120
  >>> from torchvision import datasets
121
  >>> mnist = datasets.MNIST("data/mnist", train=False, download=True)
122
  >>> digits = [[x for x, y in mnist if y == c] for c in range(10)]
123
- >>> pixel_vs = [vendiscore.compute(imgs, k="pixels") for imgs in digits]
124
- >>> inception_vs = [vendiscore.compute(imgs, k="image_embeddings", batch_size=64, device="cuda") for imgs in digits]
125
  >>> for y, (pvs, ivs) in enumerate(zip(pixel_vs, inception_vs)): print(f"{y}\t{pvs:.02f}\t{ivs:02f}")
126
  0 7.68 3.45
127
  1 5.31 3.50
@@ -142,9 +142,9 @@ Text similarity can be calculated using n-gram overlap or using inner products b
142
  "See Spot run.",
143
  "Run, Spot, run.",
144
  "Jane sees Spot run."]
145
- >>> ngram_vs = vendiscore.compute(sents, k="ngram_overlap", ns=[1, 2])
146
- >>> bert_vs = vendiscore.compute(sents, k="text_embeddings", model_path="bert-base-uncased")
147
- >>> simcse_vs = vendiscore.compute(sents, k="text_embeddings", model_path="princeton-nlp/unsup-simcse-bert-base-uncased")
148
  >>> print(f"N-grams: {ngram_vs:.02f}, BERT: {bert_vs:.02f}, SimCSE: {simcse_vs:.02f})
149
  N-grams: 3.91, BERT: 1.21, SimCSE: 2.81
150
  ```
 
44
  "See Spot run.",
45
  "Run, Spot, run.",
46
  "Jane sees Spot run."]
47
+ >>> results = vendiscore.compute(samples=samples, k="ngram_overlap", ns=[1, 2])
48
  >>> print(results)
49
  {'VS': 3.90657...}
50
  ```
 
91
  >>> vendiscore = evaluate.load("danf0/vendiscore")
92
  >>> samples = [0, 0, 10, 10, 20, 20]
93
  >>> k = lambda a, b: np.exp(-np.abs(a - b))
94
+ >>> vendiscore.compute(samples=samples, k=k)
95
  2.9999
96
  ```
97
 
 
100
  >>> K = np.array([[1.0, 0.9, 0.0],
101
  [0.9, 1.0, 0.0],
102
  [0.0, 0.0, 1.0]])
103
+ >>> vendiscore.compute(samples=K, score_K=True)
104
  2.1573
105
  ```
106
 
 
110
  (If the rows of `X` are not normalized, set `normalize = True`.)
111
  ```
112
  >>> X = np.array([[100, 0], [99, 1], [1, 99], [0, 100])
113
+ >>> vendiscore.compute(samples=X, score_dual=True, normalize=True)
114
  1.9989...
115
  ```
116
 
 
120
  >>> from torchvision import datasets
121
  >>> mnist = datasets.MNIST("data/mnist", train=False, download=True)
122
  >>> digits = [[x for x, y in mnist if y == c] for c in range(10)]
123
+ >>> pixel_vs = [vendiscore.compute(samples=imgs, k="pixels") for imgs in digits]
124
+ >>> inception_vs = [vendiscore.compute(samples=imgs, k="image_embeddings", batch_size=64, device="cuda") for imgs in digits]
125
  >>> for y, (pvs, ivs) in enumerate(zip(pixel_vs, inception_vs)): print(f"{y}\t{pvs:.02f}\t{ivs:02f}")
126
  0 7.68 3.45
127
  1 5.31 3.50
 
142
  "See Spot run.",
143
  "Run, Spot, run.",
144
  "Jane sees Spot run."]
145
+ >>> ngram_vs = vendiscore.compute(samples=sents, k="ngram_overlap", ns=[1, 2])
146
+ >>> bert_vs = vendiscore.compute(samples=sents, k="text_embeddings", model_path="bert-base-uncased")
147
+ >>> simcse_vs = vendiscore.compute(samples=sents, k="text_embeddings", model_path="princeton-nlp/unsup-simcse-bert-base-uncased")
148
  >>> print(f"N-grams: {ngram_vs:.02f}, BERT: {bert_vs:.02f}, SimCSE: {simcse_vs:.02f})
149
  N-grams: 3.91, BERT: 1.21, SimCSE: 2.81
150
  ```