id
stringlengths 33
40
| content
stringlengths 662
61.5k
| max_stars_repo_path
stringlengths 85
97
|
---|---|---|
bugs-dot-jar_data_ACCUMULO-2390_28294266 | ---
BugID: ACCUMULO-2390
Summary: TraceProxy.trace should not throw InvocationTargetException
Description: |-
In {{TraceProxy.trace}} there is the following code snippet:
{code}
try {
return method.invoke(instance, args);
} catch (Throwable ex) {
ex.printStackTrace();
throw ex;
}
{code}
When this is an InvocationTargetException, it can really mess with the calling code's exception handling logic.
diff --git a/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java b/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
index 67c4463..6b71361 100644
--- a/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
+++ b/src/trace/src/main/java/org/apache/accumulo/cloudtrace/instrument/TraceProxy.java
@@ -17,43 +17,56 @@
package org.apache.accumulo.cloudtrace.instrument;
import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
+import org.apache.log4j.Logger;
+
public class TraceProxy {
- // private static final Logger log = Logger.getLogger(TraceProxy.class);
-
+ private static final Logger log = Logger.getLogger(TraceProxy.class);
+
static final Sampler ALWAYS = new Sampler() {
@Override
public boolean next() {
return true;
}
};
-
+
public static <T> T trace(T instance) {
return trace(instance, ALWAYS);
}
-
+
@SuppressWarnings("unchecked")
public static <T> T trace(final T instance, final Sampler sampler) {
InvocationHandler handler = new InvocationHandler() {
@Override
public Object invoke(Object obj, Method method, Object[] args) throws Throwable {
- if (!sampler.next()) {
- return method.invoke(instance, args);
+ Span span = null;
+ if (sampler.next()) {
+ span = Trace.on(method.getName());
}
- Span span = Trace.on(method.getName());
try {
return method.invoke(instance, args);
- } catch (Throwable ex) {
- ex.printStackTrace();
- throw ex;
+ // Can throw RuntimeException, Error, or any checked exceptions of the method.
+ } catch (InvocationTargetException ite) {
+ Throwable cause = ite.getCause();
+ if (cause == null) {
+ // This should never happen, but account for it anyway
+ log.error("Invocation exception during trace with null cause: ", ite);
+ throw new RuntimeException(ite);
+ }
+ throw cause;
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(e);
} finally {
- span.stop();
+ if (span != null) {
+ span.stop();
+ }
}
}
};
return (T) Proxy.newProxyInstance(instance.getClass().getClassLoader(), instance.getClass().getInterfaces(), handler);
}
-
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2390_28294266.diff |
bugs-dot-jar_data_ACCUMULO-366_db4a291f | ---
BugID: ACCUMULO-366
Summary: master killed a tablet server
Description: |+
Master killed a tablet server for having long hold times.
The tablet server had this error during minor compaction:
{noformat}
01 23:57:20,073 [security.ZKAuthenticator] ERROR: org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /accumulo/88cd0f63-a36a-4218-86b1-9ba1d2cccf08/users/user004
org.apache.zookeeper.KeeperException$NoNodeException: KeeperErrorCode = NoNode for /accumulo/88cd0f63-a36a-4218-86b1-9ba1d2cccf08/users/user004
at org.apache.zookeeper.KeeperException.create(KeeperException.java:102)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:42)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1243)
at org.apache.zookeeper.ZooKeeper.getChildren(ZooKeeper.java:1271)
at org.apache.accumulo.core.zookeeper.ZooUtil.recursiveDelete(ZooUtil.java:103)
at org.apache.accumulo.core.zookeeper.ZooUtil.recursiveDelete(ZooUtil.java:117)
at org.apache.accumulo.server.zookeeper.ZooReaderWriter.recursiveDelete(ZooReaderWriter.java:67)
at sun.reflect.GeneratedMethodAccessor53.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.accumulo.server.zookeeper.ZooReaderWriter$1.invoke(ZooReaderWriter.java:169)
at $Proxy4.recursiveDelete(Unknown Source)
at org.apache.accumulo.server.security.ZKAuthenticator.dropUser(ZKAuthenticator.java:252)
at org.apache.accumulo.server.security.Auditor.dropUser(Auditor.java:104)
at org.apache.accumulo.server.client.ClientServiceHandler.dropUser(ClientServiceHandler.java:136)
at sun.reflect.GeneratedMethodAccessor52.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at cloudtrace.instrument.thrift.TraceWrap$1.invoke(TraceWrap.java:58)
at $Proxy2.dropUser(Unknown Source)
at org.apache.accumulo.core.client.impl.thrift.ClientService$Processor$dropUser.process(ClientService.java:2257)
at org.apache.accumulo.core.tabletserver.thrift.TabletClientService$Processor.process(TabletClientService.java:2037)
at org.apache.accumulo.server.util.TServerUtils$TimedProcessor.process(TServerUtils.java:151)
at org.apache.thrift.server.TNonblockingServer$FrameBuffer.invoke(TNonblockingServer.java:631)
at org.apache.accumulo.server.util.TServerUtils$THsHaServer$Invocation.run(TServerUtils.java:199)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at org.apache.accumulo.core.util.LoggingRunnable.run(LoggingRunnable.java:34)
at java.lang.Thread.run(Thread.java:662)
{noformat}
This tablet was the result of a split that occurred during a delete. The master missed this tablet when taking tablets offline.
We need to do a consistency check on the offline tablets before deleting the table information in zookeeper.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java b/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
index 6931ea8..f5bdd6b 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/zookeeper/ZooCache.java
@@ -136,7 +136,7 @@ public class ZooCache {
}
log.warn("Zookeeper error, will retry", e);
} catch (InterruptedException e) {
- log.warn("Zookeeper error, will retry", e);
+ log.info("Zookeeper error, will retry", e);
} catch (ConcurrentModificationException e) {
log.debug("Zookeeper was modified, will retry");
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
index 54e47b6..06d1670 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/ArticleExtractor.java
@@ -16,6 +16,9 @@
*/
package org.apache.accumulo.examples.wikisearch.ingest;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
import java.io.Reader;
import java.text.ParseException;
import java.text.SimpleDateFormat;
@@ -29,6 +32,7 @@ import javax.xml.stream.XMLStreamReader;
import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
import org.apache.accumulo.examples.wikisearch.normalizer.NumberNormalizer;
+import org.apache.hadoop.io.Writable;
public class ArticleExtractor {
@@ -37,13 +41,15 @@ public class ArticleExtractor {
private static NumberNormalizer nn = new NumberNormalizer();
private static LcNoDiacriticsNormalizer lcdn = new LcNoDiacriticsNormalizer();
- public static class Article {
+ public static class Article implements Writable {
int id;
String title;
long timestamp;
String comments;
String text;
+ public Article(){}
+
private Article(int id, String title, long timestamp, String comments, String text) {
super();
this.id = id;
@@ -90,6 +96,24 @@ public class ArticleExtractor {
fields.put("COMMENTS", lcdn.normalizeFieldValue("COMMENTS", this.comments));
return fields;
}
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ id = in.readInt();
+ title = in.readUTF();
+ timestamp = in.readLong();
+ comments = in.readUTF();
+ text = in.readUTF();
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(id);
+ out.writeUTF(title);
+ out.writeLong(timestamp);
+ out.writeUTF(comments);
+ out.writeUTF(text);
+ }
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
index d76d713..5a0aad4 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaConfiguration.java
@@ -48,6 +48,11 @@ public class WikipediaConfiguration {
public final static String NUM_GROUPS = "wikipedia.ingest.groups";
+ public final static String PARTITIONED_ARTICLES_DIRECTORY = "wikipedia.partitioned.directory";
+
+ public final static String RUN_PARTITIONER = "wikipedia.run.partitioner";
+ public final static String RUN_INGEST = "wikipedia.run.ingest";
+
public static String getUser(Configuration conf) {
return conf.get(USER);
@@ -117,6 +122,18 @@ public class WikipediaConfiguration {
return conf.getInt(NUM_GROUPS, 1);
}
+ public static Path getPartitionedArticlesPath(Configuration conf) {
+ return new Path(conf.get(PARTITIONED_ARTICLES_DIRECTORY));
+ }
+
+ public static boolean runPartitioner(Configuration conf) {
+ return conf.getBoolean(RUN_PARTITIONER, false);
+ }
+
+ public static boolean runIngest(Configuration conf) {
+ return conf.getBoolean(RUN_INGEST, true);
+ }
+
/**
* Helper method to get properties from Hadoop configuration
*
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
index e8b8b52..dd2eeb9 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaInputFormat.java
@@ -75,10 +75,14 @@ public class WikipediaInputFormat extends TextInputFormat {
Path file = new Path(in.readUTF());
long start = in.readLong();
long length = in.readLong();
- int numHosts = in.readInt();
- String[] hosts = new String[numHosts];
- for(int i = 0; i < numHosts; i++)
- hosts[i] = in.readUTF();
+ String [] hosts = null;
+ if(in.readBoolean())
+ {
+ int numHosts = in.readInt();
+ hosts = new String[numHosts];
+ for(int i = 0; i < numHosts; i++)
+ hosts[i] = in.readUTF();
+ }
fileSplit = new FileSplit(file, start, length, hosts);
partition = in.readInt();
}
@@ -89,10 +93,17 @@ public class WikipediaInputFormat extends TextInputFormat {
out.writeLong(fileSplit.getStart());
out.writeLong(fileSplit.getLength());
String [] hosts = fileSplit.getLocations();
- out.writeInt(hosts.length);
- for(String host:hosts)
+ if(hosts == null)
+ {
+ out.writeBoolean(false);
+ }
+ else
+ {
+ out.writeBoolean(true);
+ out.writeInt(hosts.length);
+ for(String host:hosts)
out.writeUTF(host);
- fileSplit.write(out);
+ }
out.writeInt(partition);
}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java
new file mode 100644
index 0000000..e7493dc
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedIngester.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.IteratorSetting.Column;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.iterator.GlobalIndexUidCombiner;
+import org.apache.accumulo.examples.wikisearch.iterator.TextIndexCombiner;
+import org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+public class WikipediaPartitionedIngester extends Configured implements Tool {
+
+ public final static String INGEST_LANGUAGE = "wikipedia.ingest_language";
+ public final static String SPLIT_FILE = "wikipedia.split_file";
+ public final static String TABLE_NAME = "wikipedia.table";
+
+ public static void main(String[] args) throws Exception {
+ int res = ToolRunner.run(new Configuration(), new WikipediaPartitionedIngester(), args);
+ System.exit(res);
+ }
+
+ private void createTables(TableOperations tops, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException,
+ TableExistsException {
+ // Create the shard table
+ String indexTableName = tableName + "Index";
+ String reverseIndexTableName = tableName + "ReverseIndex";
+ String metadataTableName = tableName + "Metadata";
+
+ // create the shard table
+ if (!tops.exists(tableName)) {
+ // Set a text index combiner on the given field names. No combiner is set if the option is not supplied
+ String textIndexFamilies = WikipediaMapper.TOKENS_FIELD_NAME;
+
+ tops.create(tableName);
+ if (textIndexFamilies.length() > 0) {
+ System.out.println("Adding content combiner on the fields: " + textIndexFamilies);
+
+ IteratorSetting setting = new IteratorSetting(10, TextIndexCombiner.class);
+ List<Column> columns = new ArrayList<Column>();
+ for (String family : StringUtils.split(textIndexFamilies, ',')) {
+ columns.add(new Column("fi\0" + family));
+ }
+ TextIndexCombiner.setColumns(setting, columns);
+ TextIndexCombiner.setLossyness(setting, true);
+
+ tops.attachIterator(tableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ // Set the locality group for the full content column family
+ tops.setLocalityGroups(tableName, Collections.singletonMap("WikipediaDocuments", Collections.singleton(new Text(WikipediaMapper.DOCUMENT_COLUMN_FAMILY))));
+
+ }
+
+ if (!tops.exists(indexTableName)) {
+ tops.create(indexTableName);
+ // Add the UID combiner
+ IteratorSetting setting = new IteratorSetting(19, "UIDAggregator", GlobalIndexUidCombiner.class);
+ GlobalIndexUidCombiner.setCombineAllColumns(setting, true);
+ GlobalIndexUidCombiner.setLossyness(setting, true);
+ tops.attachIterator(indexTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ if (!tops.exists(reverseIndexTableName)) {
+ tops.create(reverseIndexTableName);
+ // Add the UID combiner
+ IteratorSetting setting = new IteratorSetting(19, "UIDAggregator", GlobalIndexUidCombiner.class);
+ GlobalIndexUidCombiner.setCombineAllColumns(setting, true);
+ GlobalIndexUidCombiner.setLossyness(setting, true);
+ tops.attachIterator(reverseIndexTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ if (!tops.exists(metadataTableName)) {
+ // Add the SummingCombiner with VARLEN encoding for the frequency column
+ tops.create(metadataTableName);
+ IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setColumns(setting, Collections.singletonList(new Column("f")));
+ SummingCombiner.setEncodingType(setting, SummingCombiner.Type.VARLEN);
+ tops.attachIterator(metadataTableName, setting, EnumSet.allOf(IteratorScope.class));
+ }
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ Configuration conf = getConf();
+ if(WikipediaConfiguration.runPartitioner(conf))
+ {
+ int result = runPartitionerJob();
+ if(result != 0)
+ return result;
+ }
+ if(WikipediaConfiguration.runIngest(conf))
+ return runIngestJob();
+ return 0;
+ }
+
+ public int runPartitionerJob() throws Exception
+ {
+ Job partitionerJob = new Job(getConf(), "Partition Wikipedia");
+ Configuration partitionerConf = partitionerJob.getConfiguration();
+ partitionerConf.set("mapred.map.tasks.speculative.execution", "false");
+
+ configurePartitionerJob(partitionerJob);
+
+ List<Path> inputPaths = new ArrayList<Path>();
+ SortedSet<String> languages = new TreeSet<String>();
+ FileSystem fs = FileSystem.get(partitionerConf);
+ Path parent = new Path(partitionerConf.get("wikipedia.input"));
+ listFiles(parent, fs, inputPaths, languages);
+
+ System.out.println("Input files in " + parent + ":" + inputPaths.size());
+ Path[] inputPathsArray = new Path[inputPaths.size()];
+ inputPaths.toArray(inputPathsArray);
+
+ System.out.println("Languages:" + languages.size());
+
+ // setup input format
+
+ WikipediaInputFormat.setInputPaths(partitionerJob, inputPathsArray);
+
+ partitionerJob.setMapperClass(WikipediaPartitioner.class);
+ partitionerJob.setNumReduceTasks(0);
+
+ // setup output format
+ partitionerJob.setMapOutputKeyClass(Text.class);
+ partitionerJob.setMapOutputValueClass(Article.class);
+ partitionerJob.setOutputFormatClass(SequenceFileOutputFormat.class);
+ Path outputDir = WikipediaConfiguration.getPartitionedArticlesPath(partitionerConf);
+ SequenceFileOutputFormat.setOutputPath(partitionerJob, outputDir);
+
+ return partitionerJob.waitForCompletion(true) ? 0 : 1;
+ }
+
+ public int runIngestJob() throws Exception
+ {
+ Job ingestJob = new Job(getConf(), "Ingest Partitioned Wikipedia");
+ Configuration ingestConf = ingestJob.getConfiguration();
+ ingestConf.set("mapred.map.tasks.speculative.execution", "false");
+
+ String tablename = WikipediaConfiguration.getTableName(ingestConf);
+
+ String zookeepers = WikipediaConfiguration.getZookeepers(ingestConf);
+ String instanceName = WikipediaConfiguration.getInstanceName(ingestConf);
+
+ String user = WikipediaConfiguration.getUser(ingestConf);
+ byte[] password = WikipediaConfiguration.getPassword(ingestConf);
+ Connector connector = WikipediaConfiguration.getConnector(ingestConf);
+
+ TableOperations tops = connector.tableOperations();
+
+ createTables(tops, tablename);
+
+ // setup input format
+ ingestJob.setInputFormatClass(SequenceFileInputFormat.class);
+ SequenceFileInputFormat.setInputPaths(ingestJob, WikipediaConfiguration.getPartitionedArticlesPath(ingestConf));
+
+ // setup output format
+ ingestJob.setMapOutputKeyClass(Text.class);
+ ingestJob.setMapOutputValueClass(Mutation.class);
+ ingestJob.setOutputFormatClass(AccumuloOutputFormat.class);
+ AccumuloOutputFormat.setOutputInfo(ingestJob.getConfiguration(), user, password, true, tablename);
+ AccumuloOutputFormat.setZooKeeperInstance(ingestJob.getConfiguration(), instanceName, zookeepers);
+
+ return ingestJob.waitForCompletion(true) ? 0 : 1;
+ }
+
+ public final static PathFilter partFilter = new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ return path.getName().startsWith("part");
+ };
+ };
+
+ protected void configurePartitionerJob(Job job) {
+ Configuration conf = job.getConfiguration();
+ job.setJarByClass(WikipediaPartitionedIngester.class);
+ job.setInputFormatClass(WikipediaInputFormat.class);
+ conf.set(AggregatingRecordReader.START_TOKEN, "<page>");
+ conf.set(AggregatingRecordReader.END_TOKEN, "</page>");
+ }
+
+ protected static final Pattern filePattern = Pattern.compile("([a-z_]+).*.xml(.bz2)?");
+
+ protected void listFiles(Path path, FileSystem fs, List<Path> files, Set<String> languages) throws IOException {
+ for (FileStatus status : fs.listStatus(path)) {
+ if (status.isDir()) {
+ listFiles(status.getPath(), fs, files, languages);
+ } else {
+ Path p = status.getPath();
+ Matcher matcher = filePattern.matcher(p.getName());
+ if (matcher.matches()) {
+ languages.add(matcher.group(1));
+ files.add(p);
+ }
+ }
+ }
+ }
+}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
new file mode 100644
index 0000000..4d94c24
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.log4j.Logger;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+public class WikipediaPartitionedMapper extends Mapper<Text,Article,Text,Mutation> {
+
+ private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
+
+ public final static Charset UTF8 = Charset.forName("UTF-8");
+ public static final String DOCUMENT_COLUMN_FAMILY = "d";
+ public static final String METADATA_EVENT_COLUMN_FAMILY = "e";
+ public static final String METADATA_INDEX_COLUMN_FAMILY = "i";
+ public static final String TOKENS_FIELD_NAME = "TEXT";
+
+ private static final Value NULL_VALUE = new Value(new byte[0]);
+ private static final String cvPrefix = "all|";
+
+ private int numPartitions = 0;
+
+ private Text tablename = null;
+ private Text indexTableName = null;
+ private Text reverseIndexTableName = null;
+ private Text metadataTableName = null;
+
+ @Override
+ public void setup(Context context) {
+ Configuration conf = context.getConfiguration();
+ tablename = new Text(WikipediaConfiguration.getTableName(conf));
+ indexTableName = new Text(tablename + "Index");
+ reverseIndexTableName = new Text(tablename + "ReverseIndex");
+ metadataTableName = new Text(tablename + "Metadata");
+
+ numPartitions = WikipediaConfiguration.getNumPartitions(conf);
+ }
+
+ @Override
+ protected void map(Text language, Article article, Context context) throws IOException, InterruptedException {
+ String NULL_BYTE = "\u0000";
+ String colfPrefix = language.toString() + NULL_BYTE;
+ String indexPrefix = "fi" + NULL_BYTE;
+ ColumnVisibility cv = new ColumnVisibility(cvPrefix + language);
+
+ if (article != null) {
+ Text partitionId = new Text(Integer.toString(WikipediaMapper.getPartitionId(article, numPartitions)));
+
+ // Create the mutations for the document.
+ // Row is partition id, colf is language0articleid, colq is fieldName\0fieldValue
+ Mutation m = new Mutation(partitionId);
+ for (Entry<String,Object> entry : article.getFieldValues().entrySet()) {
+ m.put(colfPrefix + article.getId(), entry.getKey() + NULL_BYTE + entry.getValue().toString(), cv, article.getTimestamp(), NULL_VALUE);
+ // Create mutations for the metadata table.
+ Mutation mm = new Mutation(entry.getKey());
+ mm.put(METADATA_EVENT_COLUMN_FAMILY, language.toString(), cv, article.getTimestamp(), NULL_VALUE);
+ context.write(metadataTableName, mm);
+ }
+
+ // Tokenize the content
+ Set<String> tokens = getTokens(article);
+
+ // We are going to put the fields to be indexed into a multimap. This allows us to iterate
+ // over the entire set once.
+ Multimap<String,String> indexFields = HashMultimap.create();
+ // Add the normalized field values
+ LcNoDiacriticsNormalizer normalizer = new LcNoDiacriticsNormalizer();
+ for (Entry<String,String> index : article.getNormalizedFieldValues().entrySet())
+ indexFields.put(index.getKey(), index.getValue());
+ // Add the tokens
+ for (String token : tokens)
+ indexFields.put(TOKENS_FIELD_NAME, normalizer.normalizeFieldValue("", token));
+
+ for (Entry<String,String> index : indexFields.entries()) {
+ // Create mutations for the in partition index
+ // Row is partition id, colf is 'fi'\0fieldName, colq is fieldValue\0language\0article id
+ m.put(indexPrefix + index.getKey(), index.getValue() + NULL_BYTE + colfPrefix + article.getId(), cv, article.getTimestamp(), NULL_VALUE);
+
+ // Create mutations for the global index
+ // Create a UID object for the Value
+ Builder uidBuilder = Uid.List.newBuilder();
+ uidBuilder.setIGNORE(false);
+ uidBuilder.setCOUNT(1);
+ uidBuilder.addUID(Integer.toString(article.getId()));
+ Uid.List uidList = uidBuilder.build();
+ Value val = new Value(uidList.toByteArray());
+
+ // Create mutations for the global index
+ // Row is field value, colf is field name, colq is partitionid\0language, value is Uid.List object
+ Mutation gm = new Mutation(index.getValue());
+ gm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val);
+ context.write(indexTableName, gm);
+
+ // Create mutations for the global reverse index
+ Mutation grm = new Mutation(StringUtils.reverse(index.getValue()));
+ grm.put(index.getKey(), partitionId + NULL_BYTE + language, cv, article.getTimestamp(), val);
+ context.write(reverseIndexTableName, grm);
+
+ // Create mutations for the metadata table.
+ Mutation mm = new Mutation(index.getKey());
+ mm.put(METADATA_INDEX_COLUMN_FAMILY, language + NULL_BYTE + LcNoDiacriticsNormalizer.class.getName(), cv, article.getTimestamp(), NULL_VALUE);
+ context.write(metadataTableName, mm);
+
+ }
+ // Add the entire text to the document section of the table.
+ // row is the partition, colf is 'd', colq is language\0articleid, value is Base64 encoded GZIP'd document
+ m.put(DOCUMENT_COLUMN_FAMILY, colfPrefix + article.getId(), cv, article.getTimestamp(), new Value(Base64.encodeBase64(article.getText().getBytes())));
+ context.write(tablename, m);
+
+ } else {
+ context.getCounter("wikipedia", "invalid articles").increment(1);
+ }
+ context.progress();
+ }
+
+ /**
+ * Tokenize the wikipedia content
+ *
+ * @param article
+ * @return
+ * @throws IOException
+ */
+ private Set<String> getTokens(Article article) throws IOException {
+ Set<String> tokenList = new HashSet<String>();
+ WikipediaTokenizer tok = new WikipediaTokenizer(new StringReader(article.getText()));
+ TermAttribute term = tok.addAttribute(TermAttribute.class);
+ try {
+ while (tok.incrementToken()) {
+ String token = term.term();
+ if (!StringUtils.isEmpty(token))
+ tokenList.add(token);
+ }
+ } catch (IOException e) {
+ log.error("Error tokenizing text", e);
+ } finally {
+ try {
+ tok.end();
+ } catch (IOException e) {
+ log.error("Error calling end()", e);
+ } finally {
+ try {
+ tok.close();
+ } catch (IOException e) {
+ log.error("Error closing tokenizer", e);
+ }
+ }
+ }
+ return tokenList;
+ }
+
+}
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
new file mode 100644
index 0000000..82af9fd
--- /dev/null
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.accumulo.examples.wikisearch.ingest;
+
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.StringReader;
+import java.nio.charset.Charset;
+import java.util.HashSet;
+import java.util.IllegalFormatException;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
+import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
+import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
+import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+import org.apache.log4j.Logger;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+public class WikipediaPartitioner extends Mapper<LongWritable,Text,Text,Article> {
+
+ private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
+
+ public final static Charset UTF8 = Charset.forName("UTF-8");
+ public static final String DOCUMENT_COLUMN_FAMILY = "d";
+ public static final String METADATA_EVENT_COLUMN_FAMILY = "e";
+ public static final String METADATA_INDEX_COLUMN_FAMILY = "i";
+ public static final String TOKENS_FIELD_NAME = "TEXT";
+
+ private final static Pattern languagePattern = Pattern.compile("([a-z_]+).*.xml(.bz2)?");
+
+ private ArticleExtractor extractor;
+ private String language;
+
+ private int myGroup = -1;
+ private int numGroups = -1;
+
+ @Override
+ public void setup(Context context) {
+ Configuration conf = context.getConfiguration();
+
+ WikipediaInputSplit wiSplit = (WikipediaInputSplit)context.getInputSplit();
+ myGroup = wiSplit.getPartition();
+ numGroups = WikipediaConfiguration.getNumGroups(conf);
+
+ FileSplit split = wiSplit.getFileSplit();
+ String fileName = split.getPath().getName();
+ Matcher matcher = languagePattern.matcher(fileName);
+ if (matcher.matches()) {
+ language = matcher.group(1).replace('_', '-').toLowerCase();
+ } else {
+ throw new RuntimeException("Unknown ingest language! " + fileName);
+ }
+ extractor = new ArticleExtractor();
+ }
+
+ @Override
+ protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
+ Article article = extractor.extract(new InputStreamReader(new ByteArrayInputStream(value.getBytes()), UTF8));
+ if (article != null) {
+ int groupId = WikipediaMapper.getPartitionId(article, numGroups);
+ if(groupId != myGroup)
+ return;
+ context.write(new Text(language), article);
+ } else {
+ context.getCounter("wikipedia", "invalid articles").increment(1);
+ context.progress();
+ }
+ }
+
+}
diff --git a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
index 3e719e6..e709704 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
@@ -123,6 +123,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import cloudtrace.instrument.Span;
import cloudtrace.instrument.Trace;
@@ -2274,6 +2276,7 @@ public class Tablet {
if (updateMetadata) {
synchronized (this) {
updatingFlushID = false;
+ this.notifyAll();
}
}
}
@@ -2281,8 +2284,19 @@ public class Tablet {
}
boolean initiateMinorCompaction() {
+ if (isClosed()) {
+ // don't bother trying to get flush id if closed... could be closed after this check but that is ok... just trying to cut down on uneeded log messages....
+ return false;
+ }
+
// get the flush id before the new memmap is made available for write
- long flushId = getFlushID();
+ long flushId;
+ try {
+ flushId = getFlushID();
+ } catch (NoNodeException e) {
+ log.info("Asked to initiate MinC when there was no flush id " + getExtent() + " " + e.getMessage());
+ return false;
+ }
return initiateMinorCompaction(flushId);
}
@@ -2338,23 +2352,39 @@ public class Tablet {
return true;
}
- long getFlushID() {
+ long getFlushID() throws NoNodeException {
try {
String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+ Constants.ZTABLE_FLUSH_ID;
return Long.parseLong(new String(ZooReaderWriter.getRetryingInstance().getData(zTablePath, null)));
- } catch (Exception e) {
+ } catch (InterruptedException e) {
throw new RuntimeException(e);
+ } catch (NumberFormatException nfe) {
+ throw new RuntimeException(nfe);
+ } catch (KeeperException ke) {
+ if (ke instanceof NoNodeException) {
+ throw (NoNodeException) ke;
+ } else {
+ throw new RuntimeException(ke);
+ }
}
}
- long getCompactionID() {
+ long getCompactionID() throws NoNodeException {
try {
String zTablePath = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES + "/" + extent.getTableId()
+ Constants.ZTABLE_COMPACT_ID;
return Long.parseLong(new String(ZooReaderWriter.getRetryingInstance().getData(zTablePath, null)));
- } catch (Exception e) {
+ } catch (InterruptedException e) {
throw new RuntimeException(e);
+ } catch (NumberFormatException nfe) {
+ throw new RuntimeException(nfe);
+ } catch (KeeperException ke) {
+ if (ke instanceof NoNodeException) {
+ throw (NoNodeException) ke;
+ } else {
+ throw new RuntimeException(ke);
+ }
}
}
@@ -2557,13 +2587,25 @@ public class Tablet {
}
}
+ while (updatingFlushID) {
+ try {
+ this.wait(50);
+ } catch (InterruptedException e) {
+ log.error(e.toString());
+ }
+ }
+
if (!saveState || tabletMemory.getMemTable().getNumEntries() == 0) {
return;
}
tabletMemory.waitForMinC();
- mct = prepareForMinC(getFlushID());
+ try {
+ mct = prepareForMinC(getFlushID());
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
if (queueMinC) {
tabletResources.executeMinorCompaction(mct);
@@ -2612,7 +2654,11 @@ public class Tablet {
tabletMemory.waitForMinC();
if (saveState && tabletMemory.getMemTable().getNumEntries() > 0) {
- prepareForMinC(getFlushID()).run();
+ try {
+ prepareForMinC(getFlushID()).run();
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
}
if (saveState) {
@@ -3103,7 +3149,11 @@ public class Tablet {
Long compactionId = null;
if (!propogateDeletes) {
// compacting everything, so update the compaction id in !METADATA
- compactionId = getCompactionID();
+ try {
+ compactionId = getCompactionID();
+ } catch (NoNodeException e) {
+ throw new RuntimeException(e);
+ }
}
// need to handle case where only one file is being major compacted
diff --git a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index e01ca07..94e8137 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -194,6 +194,7 @@ import org.apache.thrift.TProcessor;
import org.apache.thrift.TServiceClient;
import org.apache.thrift.server.TServer;
import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import cloudtrace.instrument.Span;
import cloudtrace.instrument.Trace;
@@ -1887,7 +1888,13 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
if (flushID == null) {
// read the flush id once from zookeeper instead of reading
// it for each tablet
- flushID = tablet.getFlushID();
+ try {
+ flushID = tablet.getFlushID();
+ } catch (NoNodeException e) {
+ // table was probably deleted
+ log.info("Asked to flush table that has no flush id " + ke + " " + e.getMessage());
+ return;
+ }
}
tablet.flush(flushID);
}
@@ -1904,7 +1911,11 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
Tablet tablet = onlineTablets.get(new KeyExtent(textent));
if (tablet != null) {
log.info("Flushing " + tablet.getExtent());
- tablet.flush(tablet.getFlushID());
+ try {
+ tablet.flush(tablet.getFlushID());
+ } catch (NoNodeException nne) {
+ log.info("Asked to flush tablet that has no flush id " + new KeyExtent(textent) + " " + nne.getMessage());
+ }
}
}
@@ -1999,7 +2010,12 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
// all for the same table id, so only need to read
// compaction id once
if (compactionId == null)
- compactionId = tablet.getCompactionID();
+ try {
+ compactionId = tablet.getCompactionID();
+ } catch (NoNodeException e) {
+ log.info("Asked to compact table with no compaction id " + ke + " " + e.getMessage());
+ return;
+ }
tablet.compactAll(compactionId);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-366_db4a291f.diff |
bugs-dot-jar_data_ACCUMULO-3945_36225565 | ---
BugID: ACCUMULO-3945
Summary: In Accumulo 1.7.0, connecting to a minicluster started via bin/accumulo minicluster
doesn't work
Description: "In Accumulo 1.7.0, connecting to a minicluster started via \"bin/accumulo
minicluster\" doesn't work. When connecting, it appears to ignore the ZK port supplied
in the command and is attempting to listen to ZK on 2181.\n\nFor example:\naccumulo-1.7.0
> bin/accumulo minicluster\n…\nMini Accumulo Cluster\n\n Directory: /var/folders/rv/44k88tps4ql0dc1f68ck4d2w0000gn/T/1437925819514-0\n
\ Logs: /var/folders/rv/44k88tps4ql0dc1f68ck4d2w0000gn/T/1437925819514-0/logs\n
\ Instance Name: miniInstance\n Root Password: secret\n ZooKeeper:
\ localhost:56783\n Shutdown Port: 4445\n\n To connect with shell,
use the following command :\n accumulo shell -zh localhost:56783 -zi miniInstance
-u root\n\nSuccessfully started on Sun Jul 26 11:50:28 EDT 2015\n===================\n\nFrom
a new terminal:\n\naccumulo-1.7.0 > accumulo shell -zh localhost:56783 -zi miniInstance
-u root\nPassword: *******\n…. 60 seconds later ….\n2015-07-26 11:52:44,436 [tracer.ZooTraceClient]
ERROR: Unabled to get destination tracer hosts\nin ZooKeeper, will retry in 5000
milliseconds\njava.lang.RuntimeException: Failed to connect to zookeeper (localhost:2181)
within 2x zookeeper\ntimeout period 30000\n\tat org.apache.accumulo.fate.zookeeper.ZooSession.connect(ZooSession.java:124)\n\nShell
- Apache Accumulo Interactive Shell\n-\n- version: 1.7.0\n- instance name: miniInstance\n-
instance id: a371d4ac-8bc7-4a6a-865f-5f3c8e27fbe1\n-\n- type 'help' for a list of
available commands\n-\nroot@miniInstance>\n\n"
diff --git a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
index 92ea1a5..01b7ce3 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/ShellOptionsJC.java
@@ -315,6 +315,11 @@ public class ShellOptionsJC {
clientConfig.withZkHosts(siteConf.get(Property.INSTANCE_ZK_HOST));
}
+ // If the user provided the hosts, set the ZK for tracing too
+ if (null != zooKeeperHosts) {
+ clientConfig.setProperty(ClientProperty.INSTANCE_ZK_HOST, zooKeeperHosts);
+ }
+
return clientConfig;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3945_36225565.diff |
bugs-dot-jar_data_ACCUMULO-1051_25cf3ccd | ---
BugID: ACCUMULO-1051
Summary: Authorizations has inconsistent serialization
Description: |-
The same set of authorizations may not serialize to the same value each time, if specified in a different order when constructed (like new Authorizations("a", "b") and new Authorizations("b", "a")), because serialization reproducibility depends on the insert order in the underlying HashSet.
So, one could get the following to happen:
{code:java}
true == auths1.equals(auths2) && !auths1.serialize().equals(auths2.serialize());
{code}
diff --git a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
index 5933325..a677f3f 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/Authorizations.java
@@ -23,10 +23,9 @@ import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
import org.apache.accumulo.core.data.ArrayByteSequence;
import org.apache.accumulo.core.data.ByteSequence;
@@ -38,14 +37,14 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
private static final long serialVersionUID = 1L;
- private Set<ByteSequence> auths = new TreeSet<ByteSequence>();
+ private HashSet<ByteSequence> auths = new HashSet<ByteSequence>();
private List<byte[]> authsList = new ArrayList<byte[]>();
private List<byte[]> immutableList = Collections.unmodifiableList(authsList);
private static final boolean[] validAuthChars = new boolean[256];
public static final String HEADER = "!AUTH1:";
-
+
static {
for (int i = 0; i < 256; i++) {
validAuthChars[i] = false;
@@ -104,11 +103,11 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
* @param authorizations
* a serialized authorizations string produced by {@link #getAuthorizationsArray()} or {@link #serialize()}
*/
-
+
public Authorizations(byte[] authorizations) {
ArgumentChecker.notNull(authorizations);
-
+
String authsString = new String(authorizations);
if (authsString.startsWith(HEADER)) {
// its the new format
@@ -141,7 +140,7 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
public Authorizations(Charset charset, String... authorizations) {
setAuthorizations(charset, authorizations);
}
-
+
public Authorizations(String... authorizations) {
setAuthorizations(authorizations);
}
@@ -177,7 +176,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return ByteBufferUtil.toByteBuffers(immutableList);
}
- @Override
public String toString() {
StringBuilder sb = new StringBuilder();
String sep = "";
@@ -198,7 +196,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return auths.contains(auth);
}
- @Override
public boolean equals(Object o) {
if (o == null) {
return false;
@@ -213,7 +210,6 @@ public class Authorizations implements Iterable<byte[]>, Serializable {
return false;
}
- @Override
public int hashCode() {
int result = 0;
for (ByteSequence b : auths)
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1051_25cf3ccd.diff |
bugs-dot-jar_data_ACCUMULO-3634_9339ecf8 | ---
BugID: ACCUMULO-3634
Summary: AuthenticationTokenSecretManager might delete key while ZooAuthenticationKeyWatcher
enumerates existing keys
Description: |-
Noticed the following race condition.
The secret manager (in the master) on startup will enumerate the old keys used for creating delegation tokens and delete the keys that are expired.
At the same time, the watcher (in each tserver) might see some updates to these keys and update the secret manager. There's a race condition there that the watcher might try to read a key that the secret manager just deleted.
Need to catch the NoNodeException in the watcher and just accept that it's ok if one of these children are deleted to avoid a scary error in the monitor.
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
index 2913343..fe4407e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/ZooAuthenticationKeyWatcher.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.accumulo.fate.zookeeper.ZooReader;
import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
@@ -131,10 +132,15 @@ public class ZooAuthenticationKeyWatcher implements Watcher {
int keysAdded = 0;
for (String child : zk.getChildren(path, this)) {
String childPath = path + "/" + child;
- // Get the node data and reset the watcher
- AuthenticationKey key = deserializeKey(zk.getData(childPath, this, null));
- secretManager.addKey(key);
- keysAdded++;
+ try {
+ // Get the node data and reset the watcher
+ AuthenticationKey key = deserializeKey(zk.getData(childPath, this, null));
+ secretManager.addKey(key);
+ keysAdded++;
+ } catch (NoNodeException e) {
+ // The master expired(deleted) the key between when we saw it in getChildren() and when we went to add it to our secret manager.
+ log.trace("{} was deleted when we tried to access it", childPath);
+ }
}
return keysAdded;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3634_9339ecf8.diff |
bugs-dot-jar_data_ACCUMULO-2520_a64151e6 | ---
BugID: ACCUMULO-2520
Summary: Garbage collector deleted everything when given bad input
Description: "Patch v3 of the upgrade from ACCUMULO-2145 had a test that did the following
before upgrade.\n\n{noformat}\nroot@testUp> table !METADATA\nroot@testUp !METADATA>
grant Table.WRITE -u root \nroot@testUp !METADATA> insert ~del testDel test valueTest\n{noformat}\n\nThis
is a malformed delete entry. Accumulo code should not delete such entries. When
the 1.5.1 garbage collector saw this it did the following.\n\n{noformat}\n2014-03-20
18:20:05,359 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables\n2014-03-20
18:20:05,359 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables/!0/default_tablet/F0000009.rf\n2014-03-20
18:20:05,360 [gc.SimpleGarbageCollector] DEBUG: Deleting /accumuloTest/tables/!0/table_info/F000000b.rf\n{noformat}\n\nGC
should validate that delete entries are paths of the expected length. I have confirmed
this bug exist in 1.5.1. I am assuming it exist in 1.4 and 1.6 branches."
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 464d0d9..40fb847 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -85,10 +85,7 @@ public class GarbageCollectionAlgorithm {
tokens = tmp.toArray(new String[tmp.size()]);
}
- if (tokens.length > 3) {
- if (!path.contains(":"))
- throw new IllegalArgumentException(path);
-
+ if (tokens.length > 3 && path.contains(":")) {
if (tokens[tokens.length - 4].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 3)) {
relPath = tokens[tokens.length - 3] + "/" + tokens[tokens.length - 2] + "/" + tokens[tokens.length - 1];
} else if (tokens[tokens.length - 3].equals(ServerConstants.TABLE_DIR) && (expectedLen == 0 || expectedLen == 2)) {
@@ -96,9 +93,9 @@ public class GarbageCollectionAlgorithm {
} else {
throw new IllegalArgumentException(path);
}
- } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3)) {
+ } else if (tokens.length == 3 && (expectedLen == 0 || expectedLen == 3) && !path.contains(":")) {
relPath = tokens[0] + "/" + tokens[1] + "/" + tokens[2];
- } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2)) {
+ } else if (tokens.length == 2 && (expectedLen == 0 || expectedLen == 2) && !path.contains(":")) {
relPath = tokens[0] + "/" + tokens[1];
} else {
throw new IllegalArgumentException(path);
@@ -112,7 +109,13 @@ public class GarbageCollectionAlgorithm {
SortedMap<String,String> ret = new TreeMap<String,String>();
for (String candidate : candidates) {
- String relPath = makeRelative(candidate, 0);
+ String relPath;
+ try {
+ relPath = makeRelative(candidate, 0);
+ } catch (IllegalArgumentException iae) {
+ log.warn("Ingoring invalid deletion candidate " + candidate);
+ continue;
+ }
ret.put(relPath, candidate);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2520_a64151e6.diff |
bugs-dot-jar_data_ACCUMULO-1044_ea2f9856 | ---
BugID: ACCUMULO-1044
Summary: bulk imported files showing up in metadata after bulk import fails
Description: |
Bulk import fails. The file is moved to the failures directory.
But references in the !METADATA table remain.
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index bd19d1f..463b7b0 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.accumulo.server.zookeeper.ZooCache;
import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
@@ -72,6 +73,22 @@ public class MetadataConstraints implements Constraint {
return false;
}
+ static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
+ if (lst == null)
+ lst = new ArrayList<Short>();
+ lst.add((short)violation);
+ return lst;
+ }
+
+ static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
+ if (lst == null)
+ return addViolation(lst, intViolation);
+ short violation = (short)intViolation;
+ if (!lst.contains(violation))
+ return addViolation(lst, intViolation);
+ return lst;
+ }
+
public List<Short> check(Environment env, Mutation mutation) {
ArrayList<Short> violations = null;
@@ -96,44 +113,30 @@ public class MetadataConstraints implements Constraint {
break;
if (!validTableNameChars[0xff & b]) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (!containsSemiC) {
// see if last row char is <
if (row.length == 0 || row[row.length - 1] != '<') {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
} else {
if (row.length == 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (row.length > 0 && row[0] == '!') {
if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
// ensure row is not less than Constants.METADATA_TABLE_ID
if (new Text(row).compareTo(new Text(Constants.METADATA_TABLE_ID)) < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 5);
+ violations = addViolation(violations, 5);
}
for (ColumnUpdate columnUpdate : colUpdates) {
@@ -141,17 +144,13 @@ public class MetadataConstraints implements Constraint {
if (columnUpdate.isDeleted()) {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
}
continue;
}
if (columnUpdate.getValue().length == 0 && !columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 6);
+ violations = addViolation(violations, 6);
}
if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
@@ -159,26 +158,49 @@ public class MetadataConstraints implements Constraint {
DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} catch (NumberFormatException nfe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
} catch (ArrayIndexOutOfBoundsException aiooe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} else if (columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+ } else if (columnFamily.equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+ if (!columnUpdate.isDeleted()) {
+ // splits, which also write the time reference, are allowed to write this reference even when
+ // the transaction is not running because the other half of the tablet is holding a reference
+ // to the file.
+ boolean isSplitMutation = false;
+ // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+ // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+ // See ACCUMULO-1230.
+ boolean isLocationMutation = false;
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ if (new ColumnFQ(update).equals(Constants.METADATA_TIME_COLUMN)) {
+ isSplitMutation = true;
+ }
+ if (update.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+ isLocationMutation = true;
+ }
+ }
+
+ if (!isSplitMutation && !isLocationMutation) {
+ String tidString = new String(columnUpdate.getValue());
+ long tid = Long.parseLong(tidString);
+ try {
+ if (!new ZooArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
+ violations = addViolation(violations, 8);
+ }
+ } catch (Exception ex) {
+ violations = addViolation(violations, 8);
+ }
+ }
+ }
} else {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
&& (violations == null || !violations.contains((short) 4))) {
KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
@@ -188,9 +210,7 @@ public class MetadataConstraints implements Constraint {
boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
if (!prevEndRowLessThanEndRow) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 3);
+ violations = addViolation(violations, 3);
}
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_LOCK_COLUMN)) {
if (zooCache == null) {
@@ -211,9 +231,7 @@ public class MetadataConstraints implements Constraint {
}
if (!lockHeld) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 7);
+ violations = addViolation(violations, 7);
}
}
@@ -221,7 +239,10 @@ public class MetadataConstraints implements Constraint {
}
if (violations != null) {
- log.debug(" violating metadata mutation : " + mutation);
+ log.debug("violating metadata mutation : " + new String(mutation.getRow()));
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+ }
}
return violations;
@@ -243,6 +264,8 @@ public class MetadataConstraints implements Constraint {
return "Empty values are not allowed for any " + Constants.METADATA_TABLE_NAME + " column";
case 7:
return "Lock not held in zookeeper by writer";
+ case 8:
+ return "Bulk load transaction no longer running";
}
return null;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1044_ea2f9856.diff |
bugs-dot-jar_data_ACCUMULO-1800_3143b9c5 | ---
BugID: ACCUMULO-1800
Summary: delete mutations not working through the Proxy
Description: |
Aru Sahni writes:
{quote}
I'm new to Accumulo and am still trying to wrap my head around its ways. To further that challenge, I'm using Pyaccumulo, which doesn't present much in terms of available reference material.
Right now I'm trying to understand how Accumulo manages record (key-value pair) deletions.
conn = Accumulo(host, port, user, password)
table = 'test_table'
conn.create_table(table)
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', value='car')
writer.add_mutation(mut)
writer.close()
conn.close()
Will generate a record (found via a shell scan):
mut_01 item:name [] car
However the subsequent mutation...
writer = conn.create_batch_writer(table)
mut = Mutation('mut_01')
mut.put(cf='item', cq='name', is_delete=True)
writer.add_mutation(mut)
writer.close()
Results in:
mut_01 item:name []
How should one expect the deleted row to be represented? That record sticks around even after I force a compaction of the table. I was expecting it to not show up in any iterators, or at least provide an easy way to see if the cell has been deleted.
{quote}
[~ecn] has confirmed the problem.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 3c5c88a..c6e74f1 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1126,14 +1126,14 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (update.isSetDeleteCell()) {
m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
} else {
- if (update.isSetDeleteCell()) {
- m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
- } else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp(), value);
+ m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, update.getTimestamp(), new Value(value));
}
- }
} else {
- m.put(update.getColFamily(), update.getColQualifier(), viz, value);
+ if (update.isSetDeleteCell()) {
+ m.putDelete(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz);
+ } else {
+ m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, new Value(value));
+ }
}
}
try {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1800_3143b9c5.diff |
bugs-dot-jar_data_ACCUMULO-2962_023be574 | ---
BugID: ACCUMULO-2962
Summary: RangeInputSplit Writable methods don't serialize IteratorSettings
Description: |-
Was trying to figure out why some information was getting lost on a RangeInputSplit after serialization, and found out it was because the serialization and deserialization of the class didn't include the configured IteratorSettings.
This likely isn't a big problem for normal users as, when no IteratorSettings are configured on the RangeInputSplit, it falls back to pulling from the Configuration, but it's possible, with "non-standard" uses of mapreduce, that information could be missing in the Configuration that the mappers receive, and would subsequently error.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
index 73c9b59..05316a1 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/RangeInputSplit.java
@@ -205,6 +205,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
}
if (in.readBoolean()) {
+ int numIterators = in.readInt();
+ iterators = new ArrayList<IteratorSetting>(numIterators);
+ for (int i = 0; i < numIterators; i++) {
+ iterators.add(new IteratorSetting(in));
+ }
+ }
+
+ if (in.readBoolean()) {
level = Level.toLevel(in.readInt());
}
}
@@ -275,6 +283,14 @@ public class RangeInputSplit extends InputSplit implements Writable {
out.writeUTF(zooKeepers);
}
+ out.writeBoolean(null != iterators);
+ if (null != iterators) {
+ out.writeInt(iterators.size());
+ for (IteratorSetting iterator : iterators) {
+ iterator.write(out);
+ }
+ }
+
out.writeBoolean(null != level);
if (null != level) {
out.writeInt(level.toInt());
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2962_023be574.diff |
bugs-dot-jar_data_ACCUMULO-3385_a3267d3e | ---
BugID: ACCUMULO-3385
Summary: DateLexicoder fails to correctly order dates prior to 1970
Description: |-
DateLexicoder incorrectly orders dates before 1970 at the end of all other dates.
Therefore, the order was correct for all dates if the user only wrote dates before 1970, or only dates after 1970, but not if they did both.
The DateLexicoder should be fixed to store using a signed LongLexicoder internally, instead of the ULongLexicoder that it used before.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
index c93ba70..8533bfe 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/lexicoder/DateLexicoder.java
@@ -20,20 +20,21 @@ import java.util.Date;
/**
* A lexicoder for date objects. It preserves the native Java sort order for Date.
+ *
* @since 1.6.0
*/
public class DateLexicoder implements Lexicoder<Date> {
-
- private ULongLexicoder longEncoder = new ULongLexicoder();
-
+
+ private LongLexicoder longEncoder = new LongLexicoder();
+
@Override
public byte[] encode(Date data) {
return longEncoder.encode(data.getTime());
}
-
+
@Override
public Date decode(byte[] data) {
return new Date(longEncoder.decode(data));
}
-
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3385_a3267d3e.diff |
bugs-dot-jar_data_ACCUMULO-3897_699b8bf0 | ---
BugID: ACCUMULO-3897
Summary: ShutdownTServer never sets requestedShutdown
Description: |-
ACCUMULO-1259 made ShutdownTServer a bit more sane WRT to what it was doing and the FATE repo interface.
One attempt it makes it to not repeatedly invoke shutdownTServer on the master..
Except {{requestedShutdown}} is never set to {{true}}.
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
index 11cd91b..171e312 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tserverOps/ShutdownTServer.java
@@ -58,6 +58,7 @@ public class ShutdownTServer extends MasterRepo {
// only send this request once
if (!requestedShutdown) {
master.shutdownTServer(server);
+ requestedShutdown = true;
}
if (master.onlineTabletServers().contains(server)) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3897_699b8bf0.diff |
bugs-dot-jar_data_ACCUMULO-1183_742960f1 | ---
BugID: ACCUMULO-1183
Summary: ProxyServer does not set column information on BatchScanner
Description: The createScanner method uses the options from the thrift request to
call fetchColumn() and fetchColumnFamily(). The createBatchScanner should be doing
have the same feature, though the statements are absent from the code.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 911d187..167cecc 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -819,7 +819,17 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
scanner.setRanges(ranges);
+
+ if (opts.columns != null) {
+ for (ScanColumn col : opts.columns) {
+ if (col.isSetColQualifier())
+ scanner.fetchColumn(ByteBufferUtil.toText(col.colFamily), ByteBufferUtil.toText(col.colQualifier));
+ else
+ scanner.fetchColumnFamily(ByteBufferUtil.toText(col.colFamily));
+ }
+ }
}
+
UUID uuid = UUID.randomUUID();
ScannerPlusIterator spi = new ScannerPlusIterator();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1183_742960f1.diff |
bugs-dot-jar_data_ACCUMULO-4138_4d23d784 | ---
BugID: ACCUMULO-4138
Summary: CompactCommand description is incorrect
Description: "The compact command has the following description \n{code}\nroot@accumulo>
compact -?\nusage: compact [<table>{ <table>}] [-?] [-b <begin-row>] [--cancel]
[-e <end-row>] [-nf] [-ns <namespace> | -p <pattern> | -t <tableName>] [-pn <profile>]
\ [-w]\ndescription: sets all tablets for a table to major compact as soon as possible
(based on current time)\n -?,--help display this help\n -b,--begin-row
<begin-row> begin row (inclusive)\n --cancel cancel
user initiated compactions\n -e,--end-row <end-row> end row (inclusive)\n
\ -nf,--noFlush do not flush table data in memory before compacting.\n
\ -ns,--namespace <namespace> name of a namespace to operate on\n -p,--pattern
<pattern> regex pattern of table names to operate on\n -pn,--profile <profile>
\ iterator profile name\n -t,--table <tableName> name of a table
to operate on\n -w,--wait wait for compact to finish\n{code}\n\nHowever,
the --begin-row is not inclusive. Here is a simple demonstration.\n{code}\ncreatetable
compacttest\naddsplits a b c\ninsert \"a\" \"1\" \"\" \"\"\ninsert \"a\" \"2\" \"\"
\"\"\ninsert \"b\" \"3\" \"\" \"\"\ninsert \"b\" \"4\" \"\" \"\"\ninsert \"c\" \"5\"
\"\" \"\"\ninsert \"c\" \"6\" \"\" \"\"\nflush -w\nscan -t accumulo.metadata -np\ncompact
-b a -e c -t compacttest -w\nscan -t accumulo.metadata -np\ndeletetable compacttest
-f\n{code}\n\nYou will see that file associated with the 'a' split is still a F
flush file, which the files in the 'b' and 'c' split are A files.\n\nNot sure if
the fix is to update the commands description, which would be easy, or to make the
begin row actually inclusive."
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
index 432f17a..99e09e3 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OptUtil.java
@@ -117,7 +117,7 @@ public abstract class OptUtil {
}
public static Option startRowOpt() {
- final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (NOT) inclusive");
+ final Option o = new Option(START_ROW_OPT, "begin-row", true, "begin row (exclusive)");
o.setArgName("begin-row");
return o;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-4138_4d23d784.diff |
bugs-dot-jar_data_ACCUMULO-1348_6ff92b12 | ---
BugID: ACCUMULO-1348
Summary: Accumulo Shell does not respect 'exit' when executing file
Description: |-
If there is an {{exit}} statement in the file given via {{accumulo shell -f file}}, the execution seems to skip it and go on to the next command instead of terminating.
To recreate:
{noformat}
[mike@home ~] cat bug.accumulo
exit
scan -np -t !METADATA
[mike@home ~] bin/accumulo shell -f /home/mike/bug.accumulo
{noformat}
Expected output: None
Actual output: A full scan of the !METADATA
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 1a3c518..4469d5c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@ -78,8 +78,13 @@ public class MockShell extends Shell {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
- while (scanner.hasNextLine())
- execCommand(scanner.nextLine(), true, isVerbose());
+ try {
+ while (scanner.hasNextLine() && !hasExited()) {
+ execCommand(scanner.nextLine(), true, isVerbose());
+ }
+ } finally {
+ scanner.close();
+ }
} else if (execCommand != null) {
for (String command : execCommand.split("\n")) {
execCommand(command, true, isVerbose());
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index ab08c32..75f7bd0 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -250,7 +250,7 @@ public class Shell extends ShellOptions {
if (sysUser == null)
sysUser = "root";
String user = cl.getOptionValue(usernameOption.getOpt(), sysUser);
-
+
String passw = cl.getOptionValue(passwOption.getOpt(), null);
tabCompletion = !cl.hasOption(tabCompleteOption.getLongOpt());
String[] loginOptions = cl.getOptionValues(loginOption.getOpt());
@@ -265,13 +265,13 @@ public class Shell extends ShellOptions {
if (loginOptions == null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Must supply '-" + loginOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (passw != null && cl.hasOption(tokenOption.getOpt()))
throw new IllegalArgumentException("Can not supply '-" + passwOption.getOpt() + "' option with '-" + tokenOption.getOpt() + "' option");
-
+
if (user == null)
throw new MissingArgumentException(usernameOption);
-
+
if (loginOptions != null && cl.hasOption(tokenOption.getOpt())) {
Properties props = new Properties();
for (String loginOption : loginOptions)
@@ -283,7 +283,7 @@ public class Shell extends ShellOptions {
this.token = Class.forName(cl.getOptionValue(tokenOption.getOpt())).asSubclass(AuthenticationToken.class).newInstance();
this.token.init(props);
}
-
+
if (!cl.hasOption(fakeOption.getLongOpt())) {
DistributedTrace.enable(instance, new ZooReader(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), "shell", InetAddress.getLocalHost()
.getHostName());
@@ -447,8 +447,9 @@ public class Shell extends ShellOptions {
if (execFile != null) {
java.util.Scanner scanner = new java.util.Scanner(new File(execFile));
try {
- while (scanner.hasNextLine())
+ while (scanner.hasNextLine() && !hasExited()) {
execCommand(scanner.nextLine(), true, isVerbose());
+ }
} finally {
scanner.close();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1348_6ff92b12.diff |
bugs-dot-jar_data_ACCUMULO-1518_df4b1985 | ---
BugID: ACCUMULO-1518
Summary: FileOperations expects RFile filenames to contain only 1 dot.
Description: |-
If I attempt to create or read an RFile that contains more than 1 dot in the filename, FileOperations throws an IllegalArgumentException("File name " + name + " has no extension").
Please allow creation/import of RFiles that have more than 1 dot in the filename.
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 9f60725..17e540b 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -44,14 +44,13 @@ class DispatchingFileFactory extends FileOperations {
if (name.startsWith(Constants.MAPFILE_EXTENSION + "_")) {
return new MapFileOperations();
}
-
String[] sp = name.split("\\.");
- if (sp.length != 2) {
+ if (sp.length < 2) {
throw new IllegalArgumentException("File name " + name + " has no extension");
}
- String extension = sp[1];
+ String extension = sp[sp.length - 1];
if (extension.equals(Constants.MAPFILE_EXTENSION) || extension.equals(Constants.MAPFILE_EXTENSION + "_tmp")) {
return new MapFileOperations();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1518_df4b1985.diff |
bugs-dot-jar_data_ACCUMULO-2494_0dc92ca1 | ---
BugID: ACCUMULO-2494
Summary: Stat calculation of STDEV may be inaccurate
Description: |-
The math is sound, but it is susceptible to rounding errors. We should address that.
See http://www.strchr.com/standard_deviation_in_one_pass and http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Stat.java b/core/src/main/java/org/apache/accumulo/core/util/Stat.java
index e65265c..d2d560e 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Stat.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Stat.java
@@ -16,54 +16,66 @@
*/
package org.apache.accumulo.core.util;
+import org.apache.commons.math.stat.descriptive.StorelessUnivariateStatistic;
+import org.apache.commons.math.stat.descriptive.moment.Mean;
+import org.apache.commons.math.stat.descriptive.moment.StandardDeviation;
+import org.apache.commons.math.stat.descriptive.rank.Max;
+import org.apache.commons.math.stat.descriptive.rank.Min;
+import org.apache.commons.math.stat.descriptive.summary.Sum;
+
public class Stat {
-
- long max = Long.MIN_VALUE;
- long min = Long.MAX_VALUE;
- long sum = 0;
- int count = 0;
- double partialStdDev = 0;
-
+ Min min;
+ Max max;
+ Sum sum;
+ Mean mean;
+ StandardDeviation sd;
+
+ StorelessUnivariateStatistic[] stats;
+
+ public Stat() {
+ min = new Min();
+ max = new Max();
+ sum = new Sum();
+ mean = new Mean();
+ sd = new StandardDeviation();
+
+ stats = new StorelessUnivariateStatistic[] {min, max, sum, mean, sd};
+ }
+
public void addStat(long stat) {
- if (stat > max)
- max = stat;
- if (stat < min)
- min = stat;
-
- sum += stat;
-
- partialStdDev += stat * stat;
-
- count++;
+ for (StorelessUnivariateStatistic statistic : stats) {
+ statistic.increment(stat);
+ }
}
-
+
public long getMin() {
- return min;
+ return (long) min.getResult();
}
-
+
public long getMax() {
- return max;
+ return (long) max.getResult();
+ }
+
+ public long getSum() {
+ return (long) sum.getResult();
}
-
+
public double getAverage() {
- return ((double) sum) / count;
+ return mean.getResult();
}
-
+
public double getStdDev() {
- return Math.sqrt(partialStdDev / count - getAverage() * getAverage());
+ return sd.getResult();
}
-
+
public String toString() {
- return String.format("%,d %,d %,.2f %,d", getMin(), getMax(), getAverage(), count);
+ return String.format("%,d %,d %,.2f %,d", getMin(), getMax(), getAverage(), mean.getN());
}
-
+
public void clear() {
- sum = 0;
- count = 0;
- partialStdDev = 0;
- }
-
- public long getSum() {
- return sum;
+ for (StorelessUnivariateStatistic statistic : stats) {
+ statistic.clear();
+ }
}
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2494_0dc92ca1.diff |
bugs-dot-jar_data_ACCUMULO-3475_7651b777 | ---
BugID: ACCUMULO-3475
Summary: Shell.config()'s return value is ignored.
Description: "{{Shell.config()}} returns a boolean which is true if there was an error
configuring the shell, but the value is never observed. This can result in other
unintended errors (like trying to use the ConsoleReader member when it's not initialized)."
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
index 5ff340b..0fbe879 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockShell.java
@@ -44,15 +44,19 @@ public class MockShell extends Shell {
this.writer = writer;
}
+ @Override
public boolean config(String... args) {
- configError = super.config(args);
+ // If configuring the shell failed, fail quickly
+ if (!super.config(args)) {
+ return false;
+ }
// Update the ConsoleReader with the input and output "redirected"
try {
this.reader = new ConsoleReader(in, writer);
} catch (Exception e) {
printException(e);
- configError = true;
+ return false;
}
// Don't need this for testing purposes
@@ -61,7 +65,7 @@ public class MockShell extends Shell {
// Make the parsing from the client easier;
this.verbose = false;
- return configError;
+ return true;
}
@Override
@@ -71,9 +75,6 @@ public class MockShell extends Shell {
}
public int start() throws IOException {
- if (configError)
- return 1;
-
String input;
if (isVerbose())
printInfo();
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index cc2053f..808d340 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -185,7 +185,6 @@ public class Shell extends ShellOptions {
private Token rootToken;
public final Map<String,Command> commandFactory = new TreeMap<String,Command>();
public final Map<String,Command[]> commandGrouping = new TreeMap<String,Command[]>();
- protected boolean configError = false;
// exit if true
private boolean exit = false;
@@ -215,7 +214,11 @@ public class Shell extends ShellOptions {
this.writer = writer;
}
- // Not for client use
+ /**
+ * Configures the shell using the provided options. Not for client use.
+ *
+ * @return true if the shell was successfully configured, false otherwise.
+ */
public boolean config(String... args) {
CommandLine cl;
@@ -225,9 +228,9 @@ public class Shell extends ShellOptions {
throw new ParseException("Unrecognized arguments: " + cl.getArgList());
if (cl.hasOption(helpOpt.getOpt())) {
- configError = true;
printHelp("shell", SHELL_DESCRIPTION, opts);
- return true;
+ exitCode = 0;
+ return false;
}
setDebugging(cl.hasOption(debugOption.getLongOpt()));
@@ -238,10 +241,10 @@ public class Shell extends ShellOptions {
throw new MissingArgumentException(zooKeeperInstance);
} catch (Exception e) {
- configError = true;
printException(e);
printHelp("shell", SHELL_DESCRIPTION, opts);
- return true;
+ exitCode = 1;
+ return false;
}
// get the options that were parsed
@@ -316,7 +319,8 @@ public class Shell extends ShellOptions {
} catch (Exception e) {
printException(e);
- configError = true;
+ exitCode = 1;
+ return false;
}
// decide whether to execute commands from a file and quit
@@ -373,7 +377,7 @@ public class Shell extends ShellOptions {
for (Command cmd : otherCommands) {
commandFactory.put(cmd.getName(), cmd);
}
- return configError;
+ return true;
}
protected void setInstance(CommandLine cl) {
@@ -408,15 +412,14 @@ public class Shell extends ShellOptions {
public static void main(String args[]) throws IOException {
Shell shell = new Shell();
- shell.config(args);
+ if (!shell.config(args)) {
+ System.exit(shell.getExitCode());
+ }
System.exit(shell.start());
}
public int start() throws IOException {
- if (configError)
- return 1;
-
String input;
if (isVerbose())
printInfo();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3475_7651b777.diff |
bugs-dot-jar_data_ACCUMULO-1544_0cf2ff72 | ---
BugID: ACCUMULO-1544
Summary: Remove username from initialization
Description: This is an artifact from a brief transition area during the 1.5 development.
We have a flag for the user to set what the root username is, except it's never
used. We should remove both the variable and the flag for it.
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 7c27dd8..43fa6cb 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -337,7 +337,7 @@ public class MiniAccumuloCluster {
if (!initialized) {
// sleep a little bit to let zookeeper come up before calling init, seems to work better
UtilWaitThread.sleep(250);
- Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword(), "--username", "root");
+ Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword());
int ret = initProcess.waitFor();
if (ret != 0) {
throw new RuntimeException("Initialize process returned " + ret);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1544_0cf2ff72.diff |
bugs-dot-jar_data_ACCUMULO-1514_fb25913c | ---
BugID: ACCUMULO-1514
Summary: AccumuloVFSClassloader incorrectly treats folders as folders of jar files
Description: |
Specifying a directory of classes is incorrectly interpreted as a directory of jars in the general.dynamic.classpaths configuration property.
Example: adding a path such as *_$ACCUMULO_HOME/core/target/classes_* gets incorrectly interpreted as *_$ACCUMULO_HOME/core/target/classes/\*_* and evaluates to *_$ACCUMULO_HOME/core/target/classes/org_* and *_$ACCUMULO_HOME/core/target/classes/META-INF_*, but *NOT* to *_$ACCUMULO_HOME/core/target/classes_* as expected.
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index b1e829a..eb653bc 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -58,9 +58,9 @@ import org.apache.log4j.Logger;
*
*/
public class AccumuloVFSClassLoader {
-
+
public static class AccumuloVFSClassLoaderShutdownThread implements Runnable {
-
+
public void run() {
try {
AccumuloVFSClassLoader.close();
@@ -68,35 +68,35 @@ public class AccumuloVFSClassLoader {
// do nothing, we are shutting down anyway
}
}
-
+
}
-
+
private static List<WeakReference<DefaultFileSystemManager>> vfsInstances = Collections
.synchronizedList(new ArrayList<WeakReference<DefaultFileSystemManager>>());
-
+
public static final String DYNAMIC_CLASSPATH_PROPERTY_NAME = "general.dynamic.classpaths";
-
+
public static final String DEFAULT_DYNAMIC_CLASSPATH_VALUE = "$ACCUMULO_HOME/lib/ext/[^.].*.jar\n";
-
+
public static final String VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY = "general.vfs.classpaths";
-
+
public static final String VFS_CONTEXT_CLASSPATH_PROPERTY = "general.vfs.context.classpath.";
-
+
public static final String VFS_CACHE_DIR = "general.vfs.cache.dir";
-
+
private static ClassLoader parent = null;
private static volatile ReloadingClassLoader loader = null;
private static final Object lock = new Object();
-
+
private static ContextManager contextManager;
-
+
private static Logger log = Logger.getLogger(AccumuloVFSClassLoader.class);
-
+
static {
// Register the shutdown hook
Runtime.getRuntime().addShutdownHook(new Thread(new AccumuloVFSClassLoaderShutdownThread()));
}
-
+
public synchronized static <U> Class<? extends U> loadClass(String classname, Class<U> extension) throws ClassNotFoundException {
try {
return (Class<? extends U>) getClassLoader().loadClass(classname).asSubclass(extension);
@@ -104,40 +104,45 @@ public class AccumuloVFSClassLoader {
throw new ClassNotFoundException("IO Error loading class " + classname, e);
}
}
-
+
public static Class<?> loadClass(String classname) throws ClassNotFoundException {
return loadClass(classname, Object.class).asSubclass(Object.class);
}
-
+
static FileObject[] resolve(FileSystemManager vfs, String uris) throws FileSystemException {
return resolve(vfs, uris, new ArrayList<FileObject>());
}
-
+
static FileObject[] resolve(FileSystemManager vfs, String uris, ArrayList<FileObject> pathsToMonitor) throws FileSystemException {
if (uris == null)
return new FileObject[0];
-
+
ArrayList<FileObject> classpath = new ArrayList<FileObject>();
-
+
pathsToMonitor.clear();
-
+
for (String path : uris.split(",")) {
-
+
path = path.trim();
-
+
if (path.equals(""))
continue;
-
+
path = AccumuloClassLoader.replaceEnvVars(path, System.getenv());
-
+
FileObject fo = vfs.resolveFile(path);
-
+
switch (fo.getType()) {
case FILE:
- case FOLDER:
classpath.add(fo);
pathsToMonitor.add(fo);
break;
+ case FOLDER:
+ pathsToMonitor.add(fo);
+ for (FileObject child : fo.getChildren()) {
+ classpath.add(child);
+ }
+ break;
case IMAGINARY:
// assume its a pattern
String pattern = fo.getName().getBaseName();
@@ -157,67 +162,67 @@ public class AccumuloVFSClassLoader {
log.warn("ignoring classpath entry " + fo);
break;
}
-
+
}
-
+
return classpath.toArray(new FileObject[classpath.size()]);
}
-
+
private static ReloadingClassLoader createDynamicClassloader(final ClassLoader parent) throws FileSystemException, IOException {
String dynamicCPath = AccumuloClassLoader.getAccumuloString(DYNAMIC_CLASSPATH_PROPERTY_NAME, DEFAULT_DYNAMIC_CLASSPATH_VALUE);
-
+
String envJars = System.getenv("ACCUMULO_XTRAJARS");
if (null != envJars && !envJars.equals(""))
if (dynamicCPath != null && !dynamicCPath.equals(""))
dynamicCPath = dynamicCPath + "," + envJars;
else
dynamicCPath = envJars;
-
+
ReloadingClassLoader wrapper = new ReloadingClassLoader() {
@Override
public ClassLoader getClassLoader() {
return parent;
}
};
-
+
if (dynamicCPath == null || dynamicCPath.equals(""))
return wrapper;
-
+
// TODO monitor time for lib/ext was 1 sec... should this be configurable? - ACCUMULO-1301
return new AccumuloReloadingVFSClassLoader(dynamicCPath, generateVfs(), wrapper, 1000, true);
}
-
+
public static ClassLoader getClassLoader() throws IOException {
ReloadingClassLoader localLoader = loader;
while (null == localLoader) {
synchronized (lock) {
if (null == loader) {
-
+
FileSystemManager vfs = generateVfs();
-
+
// Set up the 2nd tier class loader
if (null == parent) {
parent = AccumuloClassLoader.getClassLoader();
}
-
+
FileObject[] vfsCP = resolve(vfs, AccumuloClassLoader.getAccumuloString(VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, ""));
-
+
if (vfsCP.length == 0) {
localLoader = createDynamicClassloader(parent);
loader = localLoader;
return localLoader.getClassLoader();
}
-
+
// Create the Accumulo Context ClassLoader using the DEFAULT_CONTEXT
localLoader = createDynamicClassloader(new VFSClassLoader(vfsCP, vfs, parent));
loader = localLoader;
}
}
}
-
+
return localLoader.getClassLoader();
}
-
+
public static FileSystemManager generateVfs() throws FileSystemException {
DefaultFileSystemManager vfs = new FinalCloseDefaultFileSystemManager();
vfs.addProvider("res", new org.apache.commons.vfs2.provider.res.ResourceFileProvider());
@@ -263,11 +268,11 @@ public class AccumuloVFSClassLoader {
vfsInstances.add(new WeakReference<DefaultFileSystemManager>(vfs));
return vfs;
}
-
+
public interface Printer {
void print(String s);
}
-
+
public static void printClassPath() {
printClassPath(new Printer() {
@Override
@@ -276,28 +281,28 @@ public class AccumuloVFSClassLoader {
}
});
}
-
+
public static void printClassPath(Printer out) {
try {
ClassLoader cl = getClassLoader();
ArrayList<ClassLoader> classloaders = new ArrayList<ClassLoader>();
-
+
while (cl != null) {
classloaders.add(cl);
cl = cl.getParent();
}
-
+
Collections.reverse(classloaders);
-
+
int level = 0;
-
+
for (ClassLoader classLoader : classloaders) {
if (level > 0)
out.print("");
level++;
-
+
String classLoaderDescription;
-
+
switch (level) {
case 1:
classLoaderDescription = level + ": Java System Classloader (loads Java system resources)";
@@ -316,16 +321,16 @@ public class AccumuloVFSClassLoader {
+ AccumuloVFSClassLoader.class.getName() + ")";
break;
}
-
+
if (classLoader instanceof URLClassLoader) {
// If VFS class loader enabled, but no contexts defined.
URLClassLoader ucl = (URLClassLoader) classLoader;
out.print("Level " + classLoaderDescription + " URL classpath items are:");
-
+
for (URL u : ucl.getURLs()) {
out.print("\t" + u.toExternalForm());
}
-
+
} else if (classLoader instanceof VFSClassLoader) {
out.print("Level " + classLoaderDescription + " VFS classpaths items are:");
VFSClassLoader vcl = (VFSClassLoader) classLoader;
@@ -336,12 +341,12 @@ public class AccumuloVFSClassLoader {
out.print("Unknown classloader configuration " + classLoader.getClass());
}
}
-
+
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
-
+
public static synchronized ContextManager getContextManager() throws IOException {
if (contextManager == null) {
getClassLoader();
@@ -356,10 +361,10 @@ public class AccumuloVFSClassLoader {
}
});
}
-
+
return contextManager;
}
-
+
public static void close() {
for (WeakReference<DefaultFileSystemManager> vfsInstance : vfsInstances) {
DefaultFileSystemManager ref = vfsInstance.get();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1514_fb25913c.diff |
bugs-dot-jar_data_ACCUMULO-3408_81d25bc2 | ---
BugID: ACCUMULO-3408
Summary: display the exact number of tablet servers
Description: "This is a regression of ACCUMULO-1140\n\n"
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
index 66f97e1..1642fc2 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/PreciseNumberType.java
@@ -24,9 +24,16 @@ public class PreciseNumberType extends NumberType<Integer> {
super(warnMin, warnMax, errMin, errMax);
}
- public PreciseNumberType() {}
-
- public static String bigNumber(long big, String[] SUFFIXES, long base) {
- return String.format("%,d", big);
+ @Override
+ public String format(Object obj) {
+ int i = (Integer)obj;
+ String display = String.format("%,d", obj);
+ if (i < errMin || i > errMax)
+ return String.format("<span class='error'>%s</span>", display);
+ if (i < warnMin || i > warnMax)
+ return String.format("<span class='warning'>%s</span>", display);
+ return display;
}
+
+ public PreciseNumberType() {}
}
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
index d311603..b285727 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/celltypes/NumberType.java
@@ -20,7 +20,7 @@ import static org.apache.accumulo.core.util.NumUtil.bigNumberForQuantity;
public class NumberType<T extends Number> extends CellType<T> {
- private T warnMin, warnMax, errMin, errMax;
+ protected final T warnMin, warnMax, errMin, errMax;
public NumberType(T warnMin, T warnMax, T errMin, T errMax) {
this.warnMin = warnMin;
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3408_81d25bc2.diff |
bugs-dot-jar_data_ACCUMULO-193_c831e44d | ---
BugID: ACCUMULO-193
Summary: key.followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS) can produce a key
with an invalid COLVIS
Description: Need a new algorithm for calculating the next biggest column visibility,
because tagging \0 to the end creates an invalid column visibility. We might be
able to minimize the timestamp for this (i.e. set timestamp to Long.MIN_VALUE, but
keep column and row elements the same).
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
index 3d1f92d..afab887 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -22,6 +22,8 @@ package org.apache.accumulo.core.data;
*
*/
+import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
+
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@@ -38,8 +40,6 @@ import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
-import static org.apache.accumulo.core.util.ByteBufferUtil.toBytes;
-
public class Key implements WritableComparable<Key>, Cloneable {
protected byte[] row;
@@ -444,8 +444,10 @@ public class Key implements WritableComparable<Key>, Cloneable {
}
public static String toPrintableString(byte ba[], int offset, int len, int maxLen) {
- StringBuilder sb = new StringBuilder();
-
+ return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
+ }
+
+ public static StringBuilder appendPrintableString(byte ba[], int offset, int len, int maxLen, StringBuilder sb) {
int plen = Math.min(len, maxLen);
for (int i = 0; i < plen; i++) {
@@ -460,26 +462,33 @@ public class Key implements WritableComparable<Key>, Cloneable {
sb.append("... TRUNCATED");
}
- return sb.toString();
+ return sb;
+ }
+
+ private StringBuilder rowColumnStringBuilder() {
+ StringBuilder sb = new StringBuilder();
+ appendPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" ");
+ appendPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(":");
+ appendPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append(" [");
+ appendPrintableString(colVisibility, 0, colVisibility.length, Constants.MAX_DATA_TO_PRINT, sb);
+ sb.append("]");
+ return sb;
}
public String toString() {
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = toPrintableString(row, 0, row.length, Constants.MAX_DATA_TO_PRINT) + " "
- + toPrintableString(colFamily, 0, colFamily.length, Constants.MAX_DATA_TO_PRINT) + ":"
- + toPrintableString(colQualifier, 0, colQualifier.length, Constants.MAX_DATA_TO_PRINT) + " " + labelString + " " + Long.toString(timestamp) + " "
- + deleted;
- return s;
+ StringBuilder sb = rowColumnStringBuilder();
+ sb.append(" ");
+ sb.append(Long.toString(timestamp));
+ sb.append(" ");
+ sb.append(deleted);
+ return sb.toString();
}
public String toStringNoTime() {
-
- String labelString = new ColumnVisibility(colVisibility).toString();
-
- String s = new String(row, 0, row.length) + " " + new String(colFamily, 0, colFamily.length) + ":" + new String(colQualifier, 0, colQualifier.length) + " "
- + labelString;
- return s;
+ return rowColumnStringBuilder().toString();
}
public int getLength() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-193_c831e44d.diff |
bugs-dot-jar_data_ACCUMULO-3474_cfb832a1 | ---
BugID: ACCUMULO-3474
Summary: ProxyServer ignores value of isDeleted on ColumnUpdate
Description: |-
The ProxyServer ignores the actual boolean value of the isDeleted flag on a ColumnUpdate. If the isDeleted value is set, regardless of the actual boolean value, the ProxyServer marks the update as a delete.
The ProxyServer should be updated to check the value of the flag.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 0fedb1d..f873010 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -1124,13 +1124,13 @@ public class ProxyServer implements AccumuloProxy.Iface {
if (update.isSetValue())
value = update.getValue();
if (update.isSetTimestamp()) {
- if (update.isSetDeleteCell()) {
+ if (update.isSetDeleteCell() && update.isDeleteCell()) {
m.putDelete(update.getColFamily(), update.getColQualifier(), viz, update.getTimestamp());
} else {
m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, update.getTimestamp(), new Value(value));
}
} else {
- if (update.isSetDeleteCell()) {
+ if (update.isSetDeleteCell() && update.isDeleteCell()) {
m.putDelete(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz);
} else {
m.put(new Text(update.getColFamily()), new Text(update.getColQualifier()), viz, new Value(value));
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3474_cfb832a1.diff |
bugs-dot-jar_data_ACCUMULO-795_9453bcfa | ---
BugID: ACCUMULO-795
Summary: MockTable doesn't obey useVersions parameter
Description: "The constructor for {{MockTable}} will call {{IteratorUtil.generateInitialTableProperties()}},
and thus set a versioning iterator on itself regardless of whether the useVersion
parameter is set to true or false. \n\nI believe {{MockTable}}'s constructor should
call IteratorUtil.generateInitialTableProperties() only if useVersions is true,
otherwise, it should populate {{settings}} with a new {{TreeMap}}"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
index c35d7fa..ea4f311 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
@@ -190,11 +190,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableName.getBytes()), ByteBuffer.wrap(timeType.name().getBytes()));
- Map<String,String> opts;
- if (limitVersion) {
- opts = IteratorUtil.generateInitialTableProperties();
- } else
- opts = Collections.emptyMap();
+ Map<String,String> opts = IteratorUtil.generateInitialTableProperties(limitVersion);
try {
doTableOperation(TableOperation.CREATE, args, opts);
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index f558822..9289608 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -88,9 +88,9 @@ public class MockTable {
private TimeType timeType;
SortedSet<Text> splits = new TreeSet<Text>();
- MockTable(boolean useVersions, TimeType timeType) {
+ MockTable(boolean limitVersion, TimeType timeType) {
this.timeType = timeType;
- settings = IteratorUtil.generateInitialTableProperties();
+ settings = IteratorUtil.generateInitialTableProperties(limitVersion);
for (Entry<String,String> entry : AccumuloConfiguration.getDefaultConfiguration()) {
String key = entry.getKey();
if (key.startsWith(Property.TABLE_PREFIX.getKey()))
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index 172fa63..9b1ca69 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -63,13 +63,22 @@ public class IteratorUtil {
}
- public static Map<String,String> generateInitialTableProperties() {
+ /**
+ * Generate the initial (default) properties for a table
+ * @param limitVersion
+ * include a VersioningIterator at priority 20 that retains a single version of a given K/V pair.
+ * @return A map of Table properties
+ */
+ public static Map<String,String> generateInitialTableProperties(boolean limitVersion) {
TreeMap<String,String> props = new TreeMap<String,String>();
- for (IteratorScope iterScope : IteratorScope.values()) {
- props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers", "20," + VersioningIterator.class.getName());
- props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers.opt.maxVersions", "1");
+ if (limitVersion) {
+ for (IteratorScope iterScope : IteratorScope.values()) {
+ props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers", "20," + VersioningIterator.class.getName());
+ props.put(Property.TABLE_ITERATOR_PREFIX + iterScope.name() + ".vers.opt.maxVersions", "1");
+ }
}
+
return props;
}
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
index 83829a9..f2495cc 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/CreateTableCommand.java
@@ -110,7 +110,7 @@ public class CreateTableCommand extends Command {
// context
if (cl.hasOption(createTableNoDefaultIters.getOpt())) {
- for (String key : IteratorUtil.generateInitialTableProperties().keySet()) {
+ for (String key : IteratorUtil.generateInitialTableProperties(true).keySet()) {
shellState.getConnector().tableOperations().removeProperty(tableName, key);
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-795_9453bcfa.diff |
bugs-dot-jar_data_ACCUMULO-1986_adee0f12 | ---
BugID: ACCUMULO-1986
Summary: Validity checks missing for readFields and Thrift deserialization
Description: Classes in o.a.a.core.data (and potentially elsewhere) that support construction
from a Thrift object and/or population from a {{DataInput}} (via a {{readFields()}}
method) often lack data validity checks that the classes' constructors enforce.
The missing checks make it possible for an attacker to create invalid objects by
manipulating the bytes being read. The situation is analogous to the need to check
objects deserialized from their Java serialized form within the {{readObject()}}
method.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
index cfb0b5c..b6cfad7 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@ -291,6 +291,19 @@ public class Key implements WritableComparable<Key>, Cloneable {
this.colVisibility = toBytes(tkey.colVisibility);
this.timestamp = tkey.timestamp;
this.deleted = false;
+
+ if (row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (colFamily == null) {
+ throw new IllegalArgumentException("null column family");
+ }
+ if (colQualifier == null) {
+ throw new IllegalArgumentException("null column qualifier");
+ }
+ if (colVisibility == null) {
+ throw new IllegalArgumentException("null column visibility");
+ }
}
/**
diff --git a/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 3979da9..6b2c09f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -187,6 +187,13 @@ public class Mutation implements Writable {
this.data = ByteBufferUtil.toBytes(tmutation.data);
this.entries = tmutation.entries;
this.values = ByteBufferUtil.toBytesList(tmutation.values);
+
+ if (this.row == null) {
+ throw new IllegalArgumentException("null row");
+ }
+ if (this.data == null) {
+ throw new IllegalArgumentException("null serialized data");
+ }
}
public Mutation(Mutation m) {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1986_adee0f12.diff |
bugs-dot-jar_data_ACCUMULO-776_dc9f23d9 | ---
BugID: ACCUMULO-776
Summary: TimestampFilter should serialize start and end as longs in the IteratorSetting
Description: "Although the TimestampFilter supports using longs to set the start or
end timestamp, it formats them as strings using SimpleDateFormat when storing or
retrieving them in the IteratorSetting.\n\nThis results in exceptions when the timestamps
being used aren't able to be formatted as _yyyyMMddHHmmssz_. For example, try {{setEnd(253402300800001,true)}}\n\nInstead,
{{setStart()}} and {{setEnd()}} could just as easily use {{String.valueOf(long i)}}
to store the values, and {{init()}} could retrieve them using {{Long.valueOf(String
s)}}. "
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
index 2dbfe66..49f0146 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
@@ -17,8 +17,8 @@
package org.apache.accumulo.core.iterators.user;
import java.io.IOException;
+import java.text.ParseException;
import java.text.SimpleDateFormat;
-import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
* A Filter that matches entries whose timestamps fall within a range.
*/
public class TimestampFilter extends Filter {
+ private static final String LONG_PREFIX = "LONG";
private final SimpleDateFormat dateParser = initDateParser();
private static SimpleDateFormat initDateParser() {
@@ -86,10 +87,20 @@ public class TimestampFilter extends Filter {
throw new IllegalArgumentException("must have either start or end for " + TimestampFilter.class.getName());
try {
- if (hasStart)
- start = dateParser.parse(options.get(START)).getTime();
- if (hasEnd)
- end = dateParser.parse(options.get(END)).getTime();
+ if (hasStart) {
+ String s = options.get(START);
+ if (s.startsWith(LONG_PREFIX))
+ start = Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ start = dateParser.parse(s).getTime();
+ }
+ if (hasEnd) {
+ String s = options.get(END);
+ if (s.startsWith(LONG_PREFIX))
+ end = Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ end = dateParser.parse(s).getTime();
+ }
} catch (Exception e) {
throw new IllegalArgumentException(e);
}
@@ -116,8 +127,8 @@ public class TimestampFilter extends Filter {
IteratorOptions io = super.describeOptions();
io.setName("tsfilter");
io.setDescription("TimestampFilter displays entries with timestamps between specified values");
- io.addNamedOption("start", "start timestamp (yyyyMMddHHmmssz)");
- io.addNamedOption("end", "end timestamp (yyyyMMddHHmmssz)");
+ io.addNamedOption("start", "start timestamp (yyyyMMddHHmmssz or LONG<longstring>)");
+ io.addNamedOption("end", "end timestamp (yyyyMMddHHmmssz or LONG<longstring>)");
io.addNamedOption("startInclusive", "true or false");
io.addNamedOption("endInclusive", "true or false");
return io;
@@ -126,11 +137,27 @@ public class TimestampFilter extends Filter {
@Override
public boolean validateOptions(Map<String,String> options) {
super.validateOptions(options);
+ boolean hasStart = false;
+ boolean hasEnd = false;
try {
- if (options.containsKey(START))
- dateParser.parse(options.get(START));
- if (options.containsKey(END))
- dateParser.parse(options.get(END));
+ if (options.containsKey(START)) {
+ hasStart = true;
+ String s = options.get(START);
+ if (s.startsWith(LONG_PREFIX))
+ Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ dateParser.parse(s);
+ }
+ if (options.containsKey(END)) {
+ hasEnd = true;
+ String s = options.get(END);
+ if (s.startsWith(LONG_PREFIX))
+ Long.valueOf(s.substring(LONG_PREFIX.length()));
+ else
+ dateParser.parse(s);
+ }
+ if (!hasStart && !hasEnd)
+ return false;
if (options.get(START_INCL) != null)
Boolean.parseBoolean(options.get(START_INCL));
if (options.get(END_INCL) != null)
@@ -185,8 +212,13 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the start is inclusive
*/
public static void setStart(IteratorSetting is, String start, boolean startInclusive) {
- is.addOption(START, start);
- is.addOption(START_INCL, Boolean.toString(startInclusive));
+ SimpleDateFormat dateParser = initDateParser();
+ try {
+ long startTS = dateParser.parse(start).getTime();
+ setStart(is, startTS, startInclusive);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException("couldn't parse " + start);
+ }
}
/**
@@ -200,8 +232,13 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the end is inclusive
*/
public static void setEnd(IteratorSetting is, String end, boolean endInclusive) {
- is.addOption(END, end);
- is.addOption(END_INCL, Boolean.toString(endInclusive));
+ SimpleDateFormat dateParser = initDateParser();
+ try {
+ long endTS = dateParser.parse(end).getTime();
+ setEnd(is, endTS, endInclusive);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException("couldn't parse " + end);
+ }
}
/**
@@ -248,8 +285,7 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the start is inclusive
*/
public static void setStart(IteratorSetting is, long start, boolean startInclusive) {
- SimpleDateFormat dateParser = initDateParser();
- is.addOption(START, dateParser.format(new Date(start)));
+ is.addOption(START, LONG_PREFIX + Long.toString(start));
is.addOption(START_INCL, Boolean.toString(startInclusive));
}
@@ -264,8 +300,7 @@ public class TimestampFilter extends Filter {
* boolean indicating whether the end is inclusive
*/
public static void setEnd(IteratorSetting is, long end, boolean endInclusive) {
- SimpleDateFormat dateParser = initDateParser();
- is.addOption(END, dateParser.format(new Date(end)));
+ is.addOption(END, LONG_PREFIX + Long.toString(end));
is.addOption(END_INCL, Boolean.toString(endInclusive));
}
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-776_dc9f23d9.diff |
bugs-dot-jar_data_ACCUMULO-3718_73ce9cfb | ---
BugID: ACCUMULO-3718
Summary: not possible to create a Mutation object from scala w/o some extra helper
code
Description: "issue: \n\nit's not possible to create a Mutation object from scala
without employing a standalone java jar wrapper. the preferred method for creating
the object has you do it in two stages: create with table row, then employ Mutation.put()
to populate the object with the actual mutation data. when you do this in scala,
you get a\n\njava.lang.IllegalStateException: Can not add to mutation after serializing
it at org.apache.accumulo.core.data.Mutation.put(Mutation.java:168) at org.apache.accumulo.core.data.Mutation.put(Mutation.java:163)
at org.apache.accumulo.core.data.Mutation.put(Mutation.java:211)\n\nerror. I *think*
this has something to do with the byte array going out of scope in Scala but somehow
not in Java. If you concat the operations (constuctor().put(data, data, ...) you
don't run into the error, but scala sees a Unit return type, so you can't actually
add the mutation to a BatchWriter. The only way I was able to get around this was
to create a stand-alone jar with a method that created then returned a populated
mutation object. \n\nI wasn't sure whether or not to call this a bug or an enhancement.
given that you probably want Accumulo to play nice with Scala I decided to call
it a bug. \n\nbelow is a link to the stack overflow thread I created whilst figuring
all this out: \n\nhttp://stackoverflow.com/questions/29497547/odd-error-when-populating-accumulo-1-6-mutation-object-via-spark-notebook/29527189#29527189\n\n\n"
diff --git a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
index 0861cc4..81ad531 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Mutation.java
@@ -191,6 +191,20 @@ public class Mutation implements Writable {
}
}
+ /* This is so hashCode & equals can be called without changing this object.
+ *
+ * It will return a copy of the current data buffer if serialized has not been
+ * called previously. Otherwise, this.data will be returned since the buffer is
+ * null and will not change.
+ */
+ private byte[] serializedSnapshot() {
+ if (buffer != null) {
+ return buffer.toArray();
+ } else {
+ return this.data;
+ }
+ }
+
/**
* @since 1.5.0
*/
@@ -691,13 +705,13 @@ public class Mutation implements Writable {
@Override
public int hashCode() {
- return toThrift().hashCode();
+ return toThrift(false).hashCode();
}
public boolean equals(Mutation m) {
- serialize();
- m.serialize();
- if (Arrays.equals(row, m.row) && entries == m.entries && Arrays.equals(data, m.data)) {
+ byte[] myData = serializedSnapshot();
+ byte[] otherData = m.serializedSnapshot();
+ if (Arrays.equals(row, m.row) && entries == m.entries && Arrays.equals(myData, otherData)) {
if (values == null && m.values == null)
return true;
@@ -716,7 +730,17 @@ public class Mutation implements Writable {
}
public TMutation toThrift() {
- serialize();
+ return toThrift(true);
+ }
+
+ private TMutation toThrift(boolean serialize) {
+ byte[] data;
+ if (serialize) {
+ this.serialize();
+ data = this.data;
+ } else {
+ data = serializedSnapshot();
+ }
return new TMutation(java.nio.ByteBuffer.wrap(row), java.nio.ByteBuffer.wrap(data), ByteBufferUtil.toByteBuffers(values), entries);
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-3718_73ce9cfb.diff |
bugs-dot-jar_data_ACCUMULO-1730_872b6db3 | ---
BugID: ACCUMULO-1730
Summary: ColumnVisibility parse tree nodes do not have correct location offsets for
AND and OR nodes
Description: |-
Trying to do some transformations on visibility strings and running into issues working with the parse tree:
Clojure 1.5.1
user=> (import [org.apache.accumulo.core.security ColumnVisibility])
org.apache.accumulo.core.security.ColumnVisibility
user=> (def vis (ColumnVisibility. "(W)|(U|V)"))
#'user/vis
user=> (.getTermStart (first (.getChildren (.getParseTree vis))))
1
user=> (.getTermEnd (first (.getChildren (.getParseTree vis))))
2
user=> (.getTermStart (second (.getChildren (.getParseTree vis))))
0
user=> (.getTermEnd (second (.getChildren (.getParseTree vis))))
8
Shouldn't those last two be 5 and 8?
diff --git a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
index 55763bc..f9c8382 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
@@ -302,6 +302,7 @@ public class ColumnVisibility {
result.add(c);
else
result.add(child);
+ result.end = index - 1;
return result;
}
case '"': {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1730_872b6db3.diff |
bugs-dot-jar_data_ACCUMULO-1044_9396979b | ---
BugID: ACCUMULO-1044
Summary: bulk imported files showing up in metadata after bulk import fails
Description: |
Bulk import fails. The file is moved to the failures directory.
But references in the !METADATA table remain.
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index bd19d1f..463b7b0 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.zookeeper.ZooUtil;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.accumulo.server.zookeeper.ZooCache;
import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
@@ -72,6 +73,22 @@ public class MetadataConstraints implements Constraint {
return false;
}
+ static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
+ if (lst == null)
+ lst = new ArrayList<Short>();
+ lst.add((short)violation);
+ return lst;
+ }
+
+ static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
+ if (lst == null)
+ return addViolation(lst, intViolation);
+ short violation = (short)intViolation;
+ if (!lst.contains(violation))
+ return addViolation(lst, intViolation);
+ return lst;
+ }
+
public List<Short> check(Environment env, Mutation mutation) {
ArrayList<Short> violations = null;
@@ -96,44 +113,30 @@ public class MetadataConstraints implements Constraint {
break;
if (!validTableNameChars[0xff & b]) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (!containsSemiC) {
// see if last row char is <
if (row.length == 0 || row[row.length - 1] != '<') {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
} else {
if (row.length == 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
if (row.length > 0 && row[0] == '!') {
if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
- if (violations == null)
- violations = new ArrayList<Short>();
- if (!violations.contains((short) 4))
- violations.add((short) 4);
+ violations = addIfNotPresent(violations, 4);
}
}
// ensure row is not less than Constants.METADATA_TABLE_ID
if (new Text(row).compareTo(new Text(Constants.METADATA_TABLE_ID)) < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 5);
+ violations = addViolation(violations, 5);
}
for (ColumnUpdate columnUpdate : colUpdates) {
@@ -141,17 +144,13 @@ public class MetadataConstraints implements Constraint {
if (columnUpdate.isDeleted()) {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
}
continue;
}
if (columnUpdate.getValue().length == 0 && !columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 6);
+ violations = addViolation(violations, 6);
}
if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
@@ -159,26 +158,49 @@ public class MetadataConstraints implements Constraint {
DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} catch (NumberFormatException nfe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
} catch (ArrayIndexOutOfBoundsException aiooe) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 1);
+ violations = addViolation(violations, 1);
}
} else if (columnFamily.equals(Constants.METADATA_SCANFILE_COLUMN_FAMILY)) {
+ } else if (columnFamily.equals(Constants.METADATA_BULKFILE_COLUMN_FAMILY)) {
+ if (!columnUpdate.isDeleted()) {
+ // splits, which also write the time reference, are allowed to write this reference even when
+ // the transaction is not running because the other half of the tablet is holding a reference
+ // to the file.
+ boolean isSplitMutation = false;
+ // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+ // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+ // See ACCUMULO-1230.
+ boolean isLocationMutation = false;
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ if (new ColumnFQ(update).equals(Constants.METADATA_TIME_COLUMN)) {
+ isSplitMutation = true;
+ }
+ if (update.getColumnFamily().equals(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY)) {
+ isLocationMutation = true;
+ }
+ }
+
+ if (!isSplitMutation && !isLocationMutation) {
+ String tidString = new String(columnUpdate.getValue());
+ long tid = Long.parseLong(tidString);
+ try {
+ if (!new ZooArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
+ violations = addViolation(violations, 8);
+ }
+ } catch (Exception ex) {
+ violations = addViolation(violations, 8);
+ }
+ }
+ }
} else {
if (!isValidColumn(columnUpdate)) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 2);
+ violations = addViolation(violations, 2);
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
&& (violations == null || !violations.contains((short) 4))) {
KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
@@ -188,9 +210,7 @@ public class MetadataConstraints implements Constraint {
boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
if (!prevEndRowLessThanEndRow) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 3);
+ violations = addViolation(violations, 3);
}
} else if (new ColumnFQ(columnUpdate).equals(Constants.METADATA_LOCK_COLUMN)) {
if (zooCache == null) {
@@ -211,9 +231,7 @@ public class MetadataConstraints implements Constraint {
}
if (!lockHeld) {
- if (violations == null)
- violations = new ArrayList<Short>();
- violations.add((short) 7);
+ violations = addViolation(violations, 7);
}
}
@@ -221,7 +239,10 @@ public class MetadataConstraints implements Constraint {
}
if (violations != null) {
- log.debug(" violating metadata mutation : " + mutation);
+ log.debug("violating metadata mutation : " + new String(mutation.getRow()));
+ for (ColumnUpdate update : mutation.getUpdates()) {
+ log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+ }
}
return violations;
@@ -243,6 +264,8 @@ public class MetadataConstraints implements Constraint {
return "Empty values are not allowed for any " + Constants.METADATA_TABLE_NAME + " column";
case 7:
return "Lock not held in zookeeper by writer";
+ case 8:
+ return "Bulk load transaction no longer running";
}
return null;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1044_9396979b.diff |
bugs-dot-jar_data_ACCUMULO-1358_6c565dfb | ---
BugID: ACCUMULO-1358
Summary: Shell's setiter is not informative when using a bad class name
Description: In the shell, I did setiter using a class that wasn't found. Rather then
a message about it not being found, I just get told that I have an invalid argument.
Even turning on debug, I had to use the stack trace to figure out why it was erroring.
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
index 4c6d2d2..26e38e6 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/SetIterCommand.java
@@ -175,14 +175,23 @@ public class SetIterCommand extends Command {
clazz = classloader.loadClass(className).asSubclass(OptionDescriber.class);
skvi = clazz.newInstance();
} catch (ClassNotFoundException e) {
- throw new IllegalArgumentException(e.getMessage());
+ StringBuilder msg = new StringBuilder("Unable to load ").append(className);
+ if (className.indexOf('.') < 0) {
+ msg.append("; did you use a fully qualified package name?");
+ } else {
+ msg.append("; class not found.");
+ }
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
} catch (InstantiationException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (IllegalAccessException e) {
throw new IllegalArgumentException(e.getMessage());
} catch (ClassCastException e) {
- throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, "Unable to load " + className + " as type " + OptionDescriber.class.getName()
- + "; configure with 'config' instead");
+ StringBuilder msg = new StringBuilder("Loaded ");
+ msg.append(className).append(" but it does not implement ");
+ msg.append(OptionDescriber.class.getSimpleName());
+ msg.append("; use 'config -s' instead.");
+ throw new ShellCommandException(ErrorCode.INITIALIZATION_FAILURE, msg.toString());
}
final IteratorOptions itopts = skvi.describeOptions();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1358_6c565dfb.diff |
bugs-dot-jar_data_ACCUMULO-217_46f62443 | ---
BugID: ACCUMULO-217
Summary: MockAccumulo doesn't throw informative errors
Description: Users are unable to tell if an error has occurred and whether it is due
to unimplemented features in MockAccumulo.
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
index 96a31e7..31f7405 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsHelper.java
@@ -48,6 +48,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void removeIterator(String tableName, String name, EnumSet<IteratorScope> scopes) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Map<String,String> copy = new TreeMap<String,String>();
for (Entry<String,String> property : this.getProperties(tableName)) {
copy.put(property.getKey(), property.getValue());
@@ -64,6 +66,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public IteratorSetting getIteratorSetting(String tableName, String name, IteratorScope scope) throws AccumuloSecurityException, AccumuloException,
TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
int priority = -1;
String classname = null;
Map<String,String> settings = new HashMap<String,String>();
@@ -90,6 +94,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public Set<String> listIterators(String tableName) throws AccumuloSecurityException, AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
Set<String> result = new HashSet<String>();
Set<String> lifecycles = new HashSet<String>();
for (IteratorScope scope : IteratorScope.values())
@@ -107,6 +113,8 @@ public abstract class TableOperationsHelper implements TableOperations {
@Override
public void checkIteratorConflicts(String tableName, IteratorSetting setting) throws AccumuloException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(null, tableName, null);
for (IteratorScope scope : setting.getScopes()) {
String scopeStr = String.format("%s%s", Property.TABLE_ITERATOR_PREFIX, scope.name().toLowerCase());
String nameStr = String.format("%s.%s", scopeStr, setting.getName());
diff --git a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 4063b76..1b2a3d0 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -81,6 +81,8 @@ public class MockTableOperations extends TableOperationsHelper {
if (!tableName.matches(Constants.VALID_TABLE_NAME_REGEX)) {
throw new IllegalArgumentException();
}
+ if (exists(tableName))
+ throw new TableExistsException(tableName, tableName, "");
acu.createTable(username, tableName, versioningIter, timeType);
}
@@ -90,30 +92,42 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public void addAggregators(String tableName, List<? extends PerColumnIteratorConfig> aggregators) throws AccumuloSecurityException, TableNotFoundException,
AccumuloException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.addAggregators(tableName, aggregators);
}
@Override
- public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {}
+ public void addSplits(String tableName, SortedSet<Text> partitionKeys) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+ throw new NotImplementedException();
+ }
@Override
- public Collection<Text> getSplits(String tableName) {
+ public Collection<Text> getSplits(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return Collections.emptyList();
}
@Override
- public Collection<Text> getSplits(String tableName, int maxSplits) {
- return Collections.emptyList();
+ public Collection<Text> getSplits(String tableName, int maxSplits) throws TableNotFoundException {
+ return getSplits(tableName);
}
@Override
public void delete(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
acu.tables.remove(tableName);
}
@Override
public void rename(String oldTableName, String newTableName) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
TableExistsException {
+ if (!exists(oldTableName))
+ throw new TableNotFoundException(oldTableName, oldTableName, "");
+ if (exists(newTableName))
+ throw new TableExistsException(newTableName, newTableName, "");
MockTable t = acu.tables.remove(oldTableName);
acu.tables.put(newTableName, t);
}
@@ -133,15 +147,19 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public Iterable<Entry<String,String>> getProperties(String tableName) throws TableNotFoundException {
+ if (!exists(tableName))
+ throw new TableNotFoundException(tableName, tableName, "");
return acu.tables.get(tableName).settings.entrySet();
}
@Override
- public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {}
+ public void setLocalityGroups(String tableName, Map<String,Set<Text>> groups) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException {
- return null;
+ throw new NotImplementedException();
}
@Override
@@ -163,13 +181,17 @@ public class MockTableOperations extends TableOperationsHelper {
}
@Override
- public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {}
+ public void offline(String tableName) throws AccumuloSecurityException, AccumuloException {
+ throw new NotImplementedException();
+ }
@Override
public void online(String tableName) throws AccumuloSecurityException, AccumuloException {}
@Override
- public void clearLocatorCache(String tableName) throws TableNotFoundException {}
+ public void clearLocatorCache(String tableName) throws TableNotFoundException {
+ throw new NotImplementedException();
+ }
@Override
public Map<String,String> tableIdMap() {
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-217_46f62443.diff |
bugs-dot-jar_data_ACCUMULO-1183_cfbf5999 | ---
BugID: ACCUMULO-1183
Summary: ProxyServer does not set column information on BatchScanner
Description: The createScanner method uses the options from the thrift request to
call fetchColumn() and fetchColumnFamily(). The createBatchScanner should be doing
have the same feature, though the statements are absent from the code.
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
index 911d187..167cecc 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/ProxyServer.java
@@ -819,7 +819,17 @@ public class ProxyServer implements AccumuloProxy.Iface {
}
}
scanner.setRanges(ranges);
+
+ if (opts.columns != null) {
+ for (ScanColumn col : opts.columns) {
+ if (col.isSetColQualifier())
+ scanner.fetchColumn(ByteBufferUtil.toText(col.colFamily), ByteBufferUtil.toText(col.colQualifier));
+ else
+ scanner.fetchColumnFamily(ByteBufferUtil.toText(col.colFamily));
+ }
+ }
}
+
UUID uuid = UUID.randomUUID();
ScannerPlusIterator spi = new ScannerPlusIterator();
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-1183_cfbf5999.diff |
bugs-dot-jar_data_ACCUMULO-907_4aeaeb2a | ---
BugID: ACCUMULO-907
Summary: stacking combiners produces a strange result
Description: |+
Paste the following into your shell:
{noformat}
deletetable test
createtable test
setiter -t test -p 16 -scan -n test_1 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 17 -scan -n test_2 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 18 -scan -n test_3 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count:a
STRING
setiter -t test -p 10 -scan -n test_4 -class org.apache.accumulo.core.iterators.user.SummingCombiner
count
STRING
insert row count a 1
insert row count a 1
insert row count b 1
insert row count b 1
insert row count b 1
insert row count c 1
scan
{noformat}
I expect:
{noformat}
row count:a [] 2
row count:b [] 3
row count:c [] 1
{noformat}
But instead, I get this:
{noformat}
row count:a [] 12
{noformat}
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 6e72073..584eb14 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -63,7 +63,7 @@ public abstract class Combiner extends WrappingIterator implements OptionDescrib
*/
public ValueIterator(SortedKeyValueIterator<Key,Value> source) {
this.source = source;
- topKey = source.getTopKey();
+ topKey = new Key(source.getTopKey());
hasNext = _hasNext();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-907_4aeaeb2a.diff |
bugs-dot-jar_data_ACCUMULO-412_be2fdba7 | ---
BugID: ACCUMULO-412
Summary: importdirectory failing on split table
Description: 'bulk import for the wikisearch example isn''t working properly: files
are not being assigned to partitions if there are splits.'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 4f95e1a..83283ac 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -65,7 +65,6 @@ public enum Property {
MASTER_RECOVERY_POOL("master.recovery.pool", "recovery", PropertyType.STRING, "Priority queue to use for log recovery map/reduce jobs."),
MASTER_RECOVERY_SORT_MAPREDUCE("master.recovery.sort.mapreduce", "false", PropertyType.BOOLEAN,
"If true, use map/reduce to sort write-ahead logs during recovery"),
- MASTER_BULK_SERVERS("master.bulk.server.max", "4", PropertyType.COUNT, "The number of servers to use during a bulk load"),
MASTER_BULK_RETRIES("master.bulk.retries", "3", PropertyType.COUNT, "The number of attempts to bulk-load a file before giving up."),
MASTER_BULK_THREADPOOL_SIZE("master.bulk.threadpool.size", "5", PropertyType.COUNT, "The number of threads to use when coordinating a bulk-import."),
MASTER_MINTHREADS("master.server.threads.minimum", "2", PropertyType.COUNT, "The minimum number of threads to use to handle incoming requests."),
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
index 94daf03..a9ed76c 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
@@ -69,7 +69,7 @@ public abstract class Filter extends WrappingIterator implements OptionDescriber
* Iterates over the source until an acceptable key/value pair is found.
*/
protected void findTop() {
- while (getSource().hasTop() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
+ while (getSource().hasTop() && !getSource().getTopKey().isDeleted() && (negate == accept(getSource().getTopKey(), getSource().getTopValue()))) {
try {
getSource().next();
} catch (IOException e) {
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
index 8bbf18a..edeaa1d 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/SortedKeyValueIterator.java
@@ -59,7 +59,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
boolean hasTop();
/**
- * Advances to the next K,V pair.
+ * Advances to the next K,V pair. Note that in minor compaction scope and in non-full major compaction scopes the iterator may see deletion entries. These
+ * entries should be preserved by all iterators except ones that are strictly scan-time iterators that will never be configured for the minc or majc scopes.
+ * Deletion entries are only removed during full major compactions.
*
* @throws IOException
* if an I/O error occurs.
@@ -88,7 +90,9 @@ public interface SortedKeyValueIterator<K extends WritableComparable<?>,V extend
void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException;
/**
- * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop().
+ * Returns top key. Can be called 0 or more times without affecting behavior of next() or hasTop(). Note that in minor compaction scope and in non-full major
+ * compaction scopes the iterator may see deletion entries. These entries should be preserved by all iterators except ones that are strictly scan-time
+ * iterators that will never be configured for the minc or majc scopes. Deletion entries are only removed during full major compactions.
*
* @return <tt>K</tt>
* @exception IllegalStateException
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
index 5e82a7d..bb4ae64 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitionedMapper.java
@@ -42,14 +42,13 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.log4j.Logger;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
public class WikipediaPartitionedMapper extends Mapper<Text,Article,Text,Mutation> {
- private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitionedMapper.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
index 82af9fd..3507108 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/ingest/WikipediaPartitioner.java
@@ -23,40 +23,21 @@ package org.apache.accumulo.examples.wikisearch.ingest;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
-import java.io.StringReader;
import java.nio.charset.Charset;
-import java.util.HashSet;
-import java.util.IllegalFormatException;
-import java.util.Map.Entry;
-import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
import org.apache.accumulo.examples.wikisearch.ingest.ArticleExtractor.Article;
import org.apache.accumulo.examples.wikisearch.ingest.WikipediaInputFormat.WikipediaInputSplit;
-import org.apache.accumulo.examples.wikisearch.normalizer.LcNoDiacriticsNormalizer;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid;
-import org.apache.accumulo.examples.wikisearch.protobuf.Uid.List.Builder;
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
-import org.apache.log4j.Logger;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.wikipedia.analysis.WikipediaTokenizer;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
public class WikipediaPartitioner extends Mapper<LongWritable,Text,Text,Article> {
- private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
+ // private static final Logger log = Logger.getLogger(WikipediaPartitioner.class);
public final static Charset UTF8 = Charset.forName("UTF-8");
public static final String DOCUMENT_COLUMN_FAMILY = "d";
diff --git a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
index d8c57c2..2738e2c 100644
--- a/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
+++ b/src/examples/wikisearch/ingest/src/main/java/org/apache/accumulo/examples/wikisearch/output/SortingRFileOutputFormat.java
@@ -4,20 +4,18 @@ import java.io.IOException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.examples.wikisearch.ingest.WikipediaMapper;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
public class SortingRFileOutputFormat extends OutputFormat<Text,Mutation> {
- private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
+ // private static final Logger log = Logger.getLogger(SortingRFileOutputFormat.class);
public static final String PATH_NAME = "sortingrfileoutputformat.path";
public static final String MAX_BUFFER_SIZE = "sortingrfileoutputformat.max.buffer.size";
diff --git a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 071b8bd..4ee5371 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -38,8 +38,8 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.impl.ServerClient;
import org.apache.accumulo.core.client.impl.TabletLocator;
-import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
+import org.apache.accumulo.core.client.impl.Translator;
import org.apache.accumulo.core.client.impl.thrift.ClientService;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -150,7 +150,7 @@ public class BulkImporter {
} catch (Exception ex) {
log.warn("Unable to find tablets that overlap file " + mapFile.toString());
}
-
+ log.debug("Map file " + mapFile + " found to overlap " + tabletsToAssignMapFileTo.size() + " tablets");
if (tabletsToAssignMapFileTo.size() == 0) {
List<KeyExtent> empty = Collections.emptyList();
completeFailures.put(mapFile, empty);
@@ -652,33 +652,41 @@ public class BulkImporter {
return findOverlappingTablets(acuConf, fs, locator, file, start, failed.getEndRow());
}
+ final static byte[] byte0 = {0};
+
public static List<TabletLocation> findOverlappingTablets(AccumuloConfiguration acuConf, FileSystem fs, TabletLocator locator, Path file, Text startRow,
Text endRow) throws Exception {
List<TabletLocation> result = new ArrayList<TabletLocation>();
-
Collection<ByteSequence> columnFamilies = Collections.emptyList();
-
- FileSKVIterator reader = FileOperations.getInstance().openReader(file.toString(), true, fs, fs.getConf(), acuConf);
+ String filename = file.toString();
+ // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
+ FileSKVIterator reader = FileOperations.getInstance().openReader(filename, true, fs, fs.getConf(), acuConf);
try {
Text row = startRow;
if (row == null)
row = new Text();
while (true) {
+ // log.debug(filename + " Seeking to row " + row);
reader.seek(new Range(row, null), columnFamilies, false);
- if (!reader.hasTop())
+ if (!reader.hasTop()) {
+ // log.debug(filename + " not found");
break;
+ }
row = reader.getTopKey().getRow();
TabletLocation tabletLocation = locator.locateTablet(row, false, true);
+ // log.debug(filename + " found row " + row + " at location " + tabletLocation);
result.add(tabletLocation);
row = tabletLocation.tablet_extent.getEndRow();
- if (row != null && (endRow == null || row.compareTo(endRow) < 0))
- row = Range.followingPrefix(row);
- else
+ if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
+ row = new Text(row);
+ row.append(byte0, 0, byte0.length);
+ } else
break;
}
} finally {
reader.close();
}
+ // log.debug(filename + " to be sent to " + result);
return result;
}
diff --git a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
index c4a3f50..05c353d 100644
--- a/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
+++ b/src/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
@@ -19,11 +19,15 @@ package org.apache.accumulo.server.master.tableOps;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
@@ -41,12 +45,13 @@ import org.apache.accumulo.core.client.impl.thrift.TableOperation;
import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.master.state.tables.TableState;
import org.apache.accumulo.core.security.thrift.AuthInfo;
import org.apache.accumulo.core.util.CachedConfiguration;
import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.LoggingRunnable;
+import org.apache.accumulo.core.util.ThriftUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -370,7 +375,7 @@ class LoadFiles extends MasterRepo {
@Override
public Repo<Master> call(final long tid, Master master) throws Exception {
-
+ final SiteConfiguration conf = ServerConfiguration.getSiteConfiguration();
FileSystem fs = TraceFileSystem.wrap(org.apache.accumulo.core.file.FileUtil.getFileSystem(CachedConfiguration.getInstance(),
ServerConfiguration.getSiteConfiguration()));
List<FileStatus> files = new ArrayList<FileStatus>();
@@ -389,42 +394,68 @@ class LoadFiles extends MasterRepo {
}
fs.delete(writable, false);
- // group files into N-sized chunks, send the chunks to random servers
- final int SERVERS_TO_USE = Math.min(ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_SERVERS), master.onlineTabletServers()
- .size());
-
- log.debug("tid " + tid + " using " + SERVERS_TO_USE + " servers");
- // wait for success, repeat failures R times
final List<String> filesToLoad = Collections.synchronizedList(new ArrayList<String>());
for (FileStatus f : files)
filesToLoad.add(f.getPath().toString());
- final int RETRIES = Math.max(1, ServerConfiguration.getSystemConfiguration().getCount(Property.MASTER_BULK_RETRIES));
- for (int i = 0; i < RETRIES && filesToLoad.size() > 0; i++) {
- List<Future<?>> results = new ArrayList<Future<?>>();
- for (List<String> chunk : groupFiles(filesToLoad, SERVERS_TO_USE)) {
- final List<String> attempt = chunk;
- results.add(threadPool.submit(new LoggingRunnable(log, new Runnable() {
+
+ final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
+ for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
+ List<Future<List<String>>> results = new ArrayList<Future<List<String>>>();
+
+ // Figure out which files will be sent to which server
+ Set<TServerInstance> currentServers = Collections.synchronizedSet(new HashSet<TServerInstance>(master.onlineTabletServers()));
+ Map<String,List<String>> loadAssignments = new HashMap<String,List<String>>();
+ for (TServerInstance server : currentServers) {
+ loadAssignments.put(server.hostPort(), new ArrayList<String>());
+ }
+ int i = 0;
+ List<Entry<String,List<String>>> entries = new ArrayList<Entry<String,List<String>>>(loadAssignments.entrySet());
+ for (String file : filesToLoad) {
+ entries.get(i % entries.size()).getValue().add(file);
+ i++;
+ }
+
+ // Use the threadpool to assign files one-at-a-time to the server
+ for (Entry<String,List<String>> entry : entries) {
+ if (entry.getValue().isEmpty()) {
+ continue;
+ }
+ final Entry<String,List<String>> finalEntry = entry;
+ results.add(threadPool.submit(new Callable<List<String>>() {
@Override
- public void run() {
+ public List<String> call() {
+ if (log.isDebugEnabled()) {
+ log.debug("Asking " + finalEntry.getKey() + " to load " + sampleList(finalEntry.getValue(), 10));
+ }
+ List<String> failures = new ArrayList<String>();
ClientService.Iface client = null;
try {
- client = ServerClient.getConnection(HdfsZooInstance.getInstance());
- List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
- attempt.removeAll(fail);
- filesToLoad.removeAll(attempt);
+ client = ThriftUtil.getTServerClient(finalEntry.getKey(), conf);
+ for (String file : finalEntry.getValue()) {
+ List<String> attempt = Collections.singletonList(file);
+ log.debug("Asking " + finalEntry.getKey() + " to bulk import " + file);
+ List<String> fail = client.bulkImportFiles(null, SecurityConstants.getSystemCredentials(), tid, tableId, attempt, errorDir, setTime);
+ if (fail.isEmpty()) {
+ filesToLoad.remove(file);
+ } else {
+ failures.addAll(fail);
+ }
+ }
} catch (Exception ex) {
log.error(ex, ex);
} finally {
ServerClient.close(client);
}
+ return failures;
}
- })));
+ }));
}
- for (Future<?> f : results)
- f.get();
+ Set<String> failures = new HashSet<String>();
+ for (Future<List<String>> f : results)
+ failures.addAll(f.get());
if (filesToLoad.size() > 0) {
- log.debug("tid " + tid + " attempt " + (i + 1) + " " + filesToLoad + " failed");
+ log.debug("tid " + tid + " attempt " + (i + 1) + " " + sampleList(filesToLoad, 10) + " failed");
UtilWaitThread.sleep(100);
}
}
@@ -449,16 +480,24 @@ class LoadFiles extends MasterRepo {
return new CompleteBulkImport(tableId, source, bulk, errorDir);
}
- private List<List<String>> groupFiles(List<String> files, int groups) {
- List<List<String>> result = new ArrayList<List<String>>();
- Iterator<String> iter = files.iterator();
- for (int i = 0; i < groups && iter.hasNext(); i++) {
- List<String> group = new ArrayList<String>();
- for (int j = 0; j < Math.ceil(files.size() / (double) groups) && iter.hasNext(); j++) {
- group.add(iter.next());
+ static String sampleList(Collection<?> potentiallyLongList, int max) {
+ StringBuffer result = new StringBuffer();
+ result.append("[");
+ int i = 0;
+ for (Object obj : potentiallyLongList) {
+ result.append(obj);
+ if (i >= max) {
+ result.append("...");
+ break;
+ } else {
+ result.append(", ");
}
- result.add(group);
+ i++;
}
- return result;
+ if (i < max)
+ result.delete(result.length() - 2, result.length());
+ result.append("]");
+ return result.toString();
}
+
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-412_be2fdba7.diff |
bugs-dot-jar_data_ACCUMULO-189_6dbbdc21 | ---
BugID: ACCUMULO-189
Summary: RegExFilter deepCopy NullPointerException
Description: 'If any of the regex matcher objects are null (i.e. for example, if you
only specify a regex for the column family), the deepCopy call will throw a NullPointerException.
'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index fcf77c4..0b3b73f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -39,10 +39,10 @@ public class RegExFilter extends Filter {
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
RegExFilter result = new RegExFilter();
result.setSource(getSource().deepCopy(env));
- result.rowMatcher = rowMatcher.pattern().matcher("");
- result.colfMatcher = colfMatcher.pattern().matcher("");
- result.colqMatcher = colqMatcher.pattern().matcher("");
- result.valueMatcher = valueMatcher.pattern().matcher("");
+ result.rowMatcher = copyMatcher(rowMatcher);
+ result.colfMatcher = copyMatcher(colfMatcher);
+ result.colqMatcher = copyMatcher(colqMatcher);
+ result.valueMatcher = copyMatcher(valueMatcher);
result.orFields = orFields;
return result;
}
@@ -61,6 +61,14 @@ public class RegExFilter extends Filter {
private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private Matcher copyMatcher(Matcher m)
+ {
+ if(m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
+ }
+
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
babcs.set(bs);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-189_6dbbdc21.diff |
bugs-dot-jar_data_ACCUMULO-189_cd7feb4d | ---
BugID: ACCUMULO-189
Summary: RegExFilter deepCopy NullPointerException
Description: 'If any of the regex matcher objects are null (i.e. for example, if you
only specify a regex for the column family), the deepCopy call will throw a NullPointerException.
'
diff --git a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index fcf77c4..0b3b73f 100644
--- a/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/src/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -39,10 +39,10 @@ public class RegExFilter extends Filter {
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
RegExFilter result = new RegExFilter();
result.setSource(getSource().deepCopy(env));
- result.rowMatcher = rowMatcher.pattern().matcher("");
- result.colfMatcher = colfMatcher.pattern().matcher("");
- result.colqMatcher = colqMatcher.pattern().matcher("");
- result.valueMatcher = valueMatcher.pattern().matcher("");
+ result.rowMatcher = copyMatcher(rowMatcher);
+ result.colfMatcher = copyMatcher(colfMatcher);
+ result.colqMatcher = copyMatcher(colqMatcher);
+ result.valueMatcher = copyMatcher(valueMatcher);
result.orFields = orFields;
return result;
}
@@ -61,6 +61,14 @@ public class RegExFilter extends Filter {
private ByteArrayBackedCharSequence babcs = new ByteArrayBackedCharSequence();
+ private Matcher copyMatcher(Matcher m)
+ {
+ if(m == null)
+ return m;
+ else
+ return m.pattern().matcher("");
+ }
+
private boolean matches(Matcher matcher, ByteSequence bs) {
if (matcher != null) {
babcs.set(bs);
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-189_cd7feb4d.diff |
bugs-dot-jar_data_ACCUMULO-2857_9fcca2ed | ---
BugID: ACCUMULO-2857
Summary: MockTableOperations.tableIdMap always returns tableName as ID
Description: |-
Noticed and fixed this during ACCUMULO-378.
An exception was thrown unexpectedly when trying to use tableIdMap with a MockInstance. Lift fix from 93c8bddc71d1ee190649eeab263205185d75421c into main tree.
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
index 5977d1d..272d1af 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
@@ -21,6 +21,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedSet;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.BatchScanner;
@@ -38,6 +39,7 @@ public class MockAccumulo {
final Map<String,String> systemProperties = new HashMap<String,String>();
Map<String,MockUser> users = new HashMap<String,MockUser>();
final FileSystem fs;
+ final AtomicInteger tableIdCounter = new AtomicInteger(0);
MockAccumulo(FileSystem fs) {
this.fs = fs;
@@ -76,7 +78,7 @@ public class MockAccumulo {
}
public void createTable(String username, String tableName, boolean useVersions, TimeType timeType) {
- MockTable t = new MockTable(useVersions, timeType);
+ MockTable t = new MockTable(useVersions, timeType, Integer.toString(tableIdCounter.incrementAndGet()));
t.userPermissions.put(username, EnumSet.allOf(TablePermission.class));
tables.put(tableName, t);
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
index 3dcab11..2e13d84 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTable.java
@@ -89,9 +89,11 @@ public class MockTable {
private TimeType timeType;
SortedSet<Text> splits = new ConcurrentSkipListSet<Text>();
Map<String,Set<Text>> localityGroups = new TreeMap<String, Set<Text>>();
+ private String tableId;
- MockTable(boolean limitVersion, TimeType timeType) {
+ MockTable(boolean limitVersion, TimeType timeType, String tableId) {
this.timeType = timeType;
+ this.tableId = tableId;
settings = IteratorUtil.generateInitialTableProperties(limitVersion);
for (Entry<String,String> entry : AccumuloConfiguration.getDefaultConfiguration()) {
String key = entry.getKey();
@@ -143,4 +145,8 @@ public class MockTable {
if (reAdd)
splits.add(start);
}
+
+ public String getTableId() {
+ return this.tableId;
+ }
}
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index 64f8225..5b15351 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -296,8 +296,8 @@ public class MockTableOperations extends TableOperationsHelper {
@Override
public Map<String,String> tableIdMap() {
Map<String,String> result = new HashMap<String,String>();
- for (String table : acu.tables.keySet()) {
- result.put(table, table);
+ for (Entry<String,MockTable> entry : acu.tables.entrySet()) {
+ result.put(entry.getKey(), entry.getValue().getTableId());
}
return result;
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2857_9fcca2ed.diff |
bugs-dot-jar_data_ACCUMULO-2899_31aea2ad | ---
BugID: ACCUMULO-2899
Summary: WAL handling fails to deal with 1.4 -> 1.5 -> 1.6
Description: |-
After doing a 1.4 -> 1.5 -> 1.6 upgrade that still has WALs for some tables, the 1.6 instance fails to correctly handle the 1.4 recovered WALs.
This can happen either through not waiting long enough after the upgrade to 1.5 or because of an offline table brought online on 1.6 (ala ACCUMULO-2816).
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 5c1194a..d4a2d4f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -529,8 +529,15 @@ public class VolumeManagerImpl implements VolumeManager {
@Override
public Path getFullPath(FileType fileType, String path) {
- if (path.contains(":"))
- return new Path(path);
+ int colon = path.indexOf(':');
+ if (colon > -1) {
+ // Check if this is really an absolute path or if this is a 1.4 style relative path for a WAL
+ if (fileType == FileType.WAL && path.charAt(colon + 1) != '/') {
+ path = path.substring(path.indexOf('/'));
+ } else {
+ return new Path(path);
+ }
+ }
// normalize the path
Path fullPath = new Path(defaultVolume.getBasePath(), fileType.getDirectory());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
index 1da945d..4a6638a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/RecoveryPath.java
@@ -34,8 +34,11 @@ public class RecoveryPath {
String uuid = walPath.getName();
// drop uuid
walPath = walPath.getParent();
- // drop server
- walPath = walPath.getParent();
+ // recovered 1.4 WALs won't have a server component
+ if (!walPath.getName().equals(FileType.WAL.getDirectory())) {
+ // drop server
+ walPath = walPath.getParent();
+ }
if (!walPath.getName().equals(FileType.WAL.getDirectory()))
throw new IllegalArgumentException("Bad path " + walPath);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index ae850af..56a0fd5 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -281,7 +281,9 @@ public class GarbageCollectWriteAheadLogs {
while (iterator.hasNext()) {
for (String entry : iterator.next().logSet) {
- String uuid = new Path(entry).getName();
+ // old style WALs will have the IP:Port of their logger and new style will either be a Path either absolute or relative, in all cases
+ // the last "/" will mark a UUID file name.
+ String uuid = entry.substring(entry.lastIndexOf("/") + 1);
if (!isUUID(uuid)) {
// fully expect this to be a uuid, if its not then something is wrong and walog GC should not proceed!
throw new IllegalArgumentException("Expected uuid, but got " + uuid + " from " + entry);
@@ -327,8 +329,8 @@ public class GarbageCollectWriteAheadLogs {
continue;
for (FileStatus status : listing) {
String server = status.getPath().getName();
- servers.add(server);
if (status.isDir()) {
+ servers.add(server);
for (FileStatus file : fs.listStatus(new Path(walRoot, server))) {
if (isUUID(file.getPath().getName())) {
fileToServerMap.put(file.getPath(), server);
@@ -339,7 +341,9 @@ public class GarbageCollectWriteAheadLogs {
}
} else if (isUUID(server)) {
// old-style WAL are not under a directory
+ servers.add("");
fileToServerMap.put(status.getPath(), "");
+ nameToFileMap.put(server, status.getPath());
} else {
log.info("Ignoring file " + status.getPath() + " because it doesn't look like a uuid");
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
index f73d4ca..36b2289 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/Tablet.java
@@ -1357,6 +1357,8 @@ public class Tablet {
tabletResources.setTablet(this, acuTableConf);
if (!logEntries.isEmpty()) {
log.info("Starting Write-Ahead Log recovery for " + this.extent);
+ // count[0] = entries used on tablet
+ // count[1] = track max time from walog entries wihtout timestamps
final long[] count = new long[2];
final CommitSession commitSession = tabletMemory.getCommitSession();
count[1] = Long.MIN_VALUE;
@@ -1388,6 +1390,7 @@ public class Tablet {
commitSession.updateMaxCommittedTime(tabletTime.getTime());
if (count[0] == 0) {
+ log.debug("No replayed mutations applied, removing unused entries for " + extent);
MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
logEntries.clear();
}
@@ -1403,7 +1406,7 @@ public class Tablet {
currentLogs = new HashSet<DfsLogger>();
for (LogEntry logEntry : logEntries) {
for (String log : logEntry.logSet) {
- currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), log));
+ currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), log, logEntry.getColumnQualifier().toString()));
}
}
@@ -3661,12 +3664,12 @@ public class Tablet {
for (DfsLogger logger : otherLogs) {
otherLogsCopy.add(logger.toString());
- doomed.add(logger.toString());
+ doomed.add(logger.getMeta());
}
for (DfsLogger logger : currentLogs) {
currentLogsCopy.add(logger.toString());
- doomed.remove(logger.toString());
+ doomed.remove(logger.getMeta());
}
otherLogs = Collections.emptySet();
@@ -3684,6 +3687,10 @@ public class Tablet {
log.debug("Logs for current memory: " + getExtent() + " " + logger);
}
+ for (String logger : doomed) {
+ log.debug("Logs to be destroyed: " + getExtent() + " " + logger);
+ }
+
return doomed;
}
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index cca2953..b152380 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -220,13 +220,21 @@ public class DfsLogger {
private String logPath;
private Daemon syncThread;
+ /* Track what's actually in +r/!0 for this logger ref */
+ private String metaReference;
+
public DfsLogger(ServerResources conf) throws IOException {
this.conf = conf;
}
- public DfsLogger(ServerResources conf, String filename) throws IOException {
+ /**
+ * Refernce a pre-existing log file.
+ * @param meta the cq for the "log" entry in +r/!0
+ */
+ public DfsLogger(ServerResources conf, String filename, String meta) throws IOException {
this.conf = conf;
this.logPath = filename;
+ metaReference = meta;
}
public static DFSLoggerInputStreams readHeaderAndReturnStream(VolumeManager fs, Path path, AccumuloConfiguration conf) throws IOException {
@@ -315,6 +323,7 @@ public class DfsLogger {
VolumeManager fs = conf.getFileSystem();
logPath = fs.choose(ServerConstants.getWalDirs()) + "/" + logger + "/" + filename;
+ metaReference = toString();
try {
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
@@ -400,6 +409,16 @@ public class DfsLogger {
return fileName;
}
+ /**
+ * get the cq needed to reference this logger's entry in +r/!0
+ */
+ public String getMeta() {
+ if (null == metaReference) {
+ throw new IllegalStateException("logger doesn't have meta reference. " + this);
+ }
+ return metaReference;
+ }
+
public String getFileName() {
return logPath.toString();
}
| bugs-dot-jar/accumulo_extracted_diff/developer-patch_bugs-dot-jar_ACCUMULO-2899_31aea2ad.diff |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 39