target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test public void canGetBitmap() { PrintPreview printPreview = setupValidPrintPreview(true); ShadowBitmap bitmap = Shadows.shadowOf(printPreview.getBitmap()); assertThat(bitmap).isNotNull(); assertThat(bitmap.getDescription()).isEqualTo("Bitmap (576 x 48)\n" + "Bitmap (9 x 32)"); assertThat(printPreview.availableWidth).isEqualTo(576); }
public Bitmap getBitmap() { return bitmap; }
PrintPreview { public Bitmap getBitmap() { return bitmap; } }
PrintPreview { public Bitmap getBitmap() { return bitmap; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); }
PrintPreview { public Bitmap getBitmap() { return bitmap; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
PrintPreview { public Bitmap getBitmap() { return bitmap; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
@Test public void canGetScaledBitmap() { PrintPreview printPreview = setupValidPrintPreview(true); ShadowBitmap bitmap = Shadows.shadowOf(printPreview.getScaledBitmap(RuntimeEnvironment.application)); assertThat(bitmap).isNotNull(); assertThat(bitmap.getDescription()).isEqualTo("Bitmap (576 x 48)\n" + "Bitmap (9 x 32) scaled to 708 x 59 with filter true"); }
public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
@Test public void handlesNoFonts() { PrintPreview printPreview = setupValidPrintPreview(false); Bitmap bitmap = printPreview.getScaledBitmap(RuntimeEnvironment.application); assertThat(bitmap).isNotNull(); }
public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
PrintPreview { public Bitmap getScaledBitmap(Context context) { cursor = VERTICAL_MARGIN; DisplayMetrics metrics = context.getResources().getDisplayMetrics(); Bitmap bitmap = getBitmap(); float pxPerMm = metrics.xdpi / 25.4f; int width = bitmap.getWidth(); float scale = (printerSettings.getPrintableWidth() * pxPerMm) / (float) width; return Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scale), (int) (bitmap.getHeight() * scale), true); } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
@Test public void willSplitLongLineCorrectly() { PrintPayload payload = new PrintPayload(); TextRow row = payload.append("this is a very long line of text that should be split across multiple lines"); PrintPreview printPreview = new PrintPreview(payload, getPrinterSettings()); List<TextRow> rows = printPreview.splitLongTextRow(row); assertThat(rows).hasSize(2); assertThat(rows.get(0).getText()).isEqualTo("this is a very long line of text that should be "); assertThat(rows.get(1).getText()).isEqualTo("split across multiple lines"); assertThat(rows.get(0).getText().length()).isLessThanOrEqualTo(FONT_A.getNumColumns()); assertThat(rows.get(1).getText().length()).isLessThanOrEqualTo(FONT_A.getNumColumns()); }
protected List<TextRow> splitLongTextRow(TextRow textRow) { List<TextRow> result = new ArrayList<>(); PrinterFont font = getFont(textRow.getPrinterFontId()); String line = textRow.getText(); String remaining = ""; while (line.length() > 0) { if (line.length() > font.getNumColumns()) { remaining = line.charAt(line.length() - 1) + remaining; line = line.substring(0, line.length() - 1); } else { TextRow newRow = new TextRow(line) .align(textRow.getAlignmentStyle()) .fontStyle(textRow.getFontStyle()) .underline(textRow.getUnderlineStyle()) .setFont(font); result.add(newRow); line = remaining; remaining = ""; } } return result; }
PrintPreview { protected List<TextRow> splitLongTextRow(TextRow textRow) { List<TextRow> result = new ArrayList<>(); PrinterFont font = getFont(textRow.getPrinterFontId()); String line = textRow.getText(); String remaining = ""; while (line.length() > 0) { if (line.length() > font.getNumColumns()) { remaining = line.charAt(line.length() - 1) + remaining; line = line.substring(0, line.length() - 1); } else { TextRow newRow = new TextRow(line) .align(textRow.getAlignmentStyle()) .fontStyle(textRow.getFontStyle()) .underline(textRow.getUnderlineStyle()) .setFont(font); result.add(newRow); line = remaining; remaining = ""; } } return result; } }
PrintPreview { protected List<TextRow> splitLongTextRow(TextRow textRow) { List<TextRow> result = new ArrayList<>(); PrinterFont font = getFont(textRow.getPrinterFontId()); String line = textRow.getText(); String remaining = ""; while (line.length() > 0) { if (line.length() > font.getNumColumns()) { remaining = line.charAt(line.length() - 1) + remaining; line = line.substring(0, line.length() - 1); } else { TextRow newRow = new TextRow(line) .align(textRow.getAlignmentStyle()) .fontStyle(textRow.getFontStyle()) .underline(textRow.getUnderlineStyle()) .setFont(font); result.add(newRow); line = remaining; remaining = ""; } } return result; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); }
PrintPreview { protected List<TextRow> splitLongTextRow(TextRow textRow) { List<TextRow> result = new ArrayList<>(); PrinterFont font = getFont(textRow.getPrinterFontId()); String line = textRow.getText(); String remaining = ""; while (line.length() > 0) { if (line.length() > font.getNumColumns()) { remaining = line.charAt(line.length() - 1) + remaining; line = line.substring(0, line.length() - 1); } else { TextRow newRow = new TextRow(line) .align(textRow.getAlignmentStyle()) .fontStyle(textRow.getFontStyle()) .underline(textRow.getUnderlineStyle()) .setFont(font); result.add(newRow); line = remaining; remaining = ""; } } return result; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
PrintPreview { protected List<TextRow> splitLongTextRow(TextRow textRow) { List<TextRow> result = new ArrayList<>(); PrinterFont font = getFont(textRow.getPrinterFontId()); String line = textRow.getText(); String remaining = ""; while (line.length() > 0) { if (line.length() > font.getNumColumns()) { remaining = line.charAt(line.length() - 1) + remaining; line = line.substring(0, line.length() - 1); } else { TextRow newRow = new TextRow(line) .align(textRow.getAlignmentStyle()) .fontStyle(textRow.getFontStyle()) .underline(textRow.getUnderlineStyle()) .setFont(font); result.add(newRow); line = remaining; remaining = ""; } } return result; } PrintPreview(PrintPayload printPayload, PrinterSettings printerSettings); Bitmap getBitmap(); Bitmap getScaledBitmap(Context context); byte[] getCompressedBitmap(); int determineHeight(); }
@Test public void willSetPrinterId() { PrintPayload payload = new PrintPayload("17676767"); assertThat(payload.getPrinterId()).isEqualTo("17676767"); }
public String getPrinterId() { return printerId; }
PrintPayload implements Jsonable { public String getPrinterId() { return printerId; } }
PrintPayload implements Jsonable { public String getPrinterId() { return printerId; } PrintPayload(); PrintPayload(String printerId); }
PrintPayload implements Jsonable { public String getPrinterId() { return printerId; } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
PrintPayload implements Jsonable { public String getPrinterId() { return printerId; } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
@Test public void checkAlignLeftRight() { PrintPayload payload = new PrintPayload(); TextRow row = payload.appendLeftRight(30, "Left", "Right"); assertThat(row.getText()).isEqualTo("Left Right"); }
public TextRow appendLeftRight(int columns, String left, String right) { String space = getSpaces(columns - left.length() - right.length()); return append(left + space + right); }
PrintPayload implements Jsonable { public TextRow appendLeftRight(int columns, String left, String right) { String space = getSpaces(columns - left.length() - right.length()); return append(left + space + right); } }
PrintPayload implements Jsonable { public TextRow appendLeftRight(int columns, String left, String right) { String space = getSpaces(columns - left.length() - right.length()); return append(left + space + right); } PrintPayload(); PrintPayload(String printerId); }
PrintPayload implements Jsonable { public TextRow appendLeftRight(int columns, String left, String right) { String space = getSpaces(columns - left.length() - right.length()); return append(left + space + right); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
PrintPayload implements Jsonable { public TextRow appendLeftRight(int columns, String left, String right) { String space = getSpaces(columns - left.length() - right.length()); return append(left + space + right); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
@Test public void checkAppendLineOfChar() { PrintPayload payload = new PrintPayload(); TextRow row = payload.appendLineOfChar(30, "="); assertThat(row.getText()).isEqualTo("=============================="); }
public TextRow appendLineOfChar(int columns, String character) { String line = character; while (line.length() < columns) { line += character; } return append(line); }
PrintPayload implements Jsonable { public TextRow appendLineOfChar(int columns, String character) { String line = character; while (line.length() < columns) { line += character; } return append(line); } }
PrintPayload implements Jsonable { public TextRow appendLineOfChar(int columns, String character) { String line = character; while (line.length() < columns) { line += character; } return append(line); } PrintPayload(); PrintPayload(String printerId); }
PrintPayload implements Jsonable { public TextRow appendLineOfChar(int columns, String character) { String line = character; while (line.length() < columns) { line += character; } return append(line); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
PrintPayload implements Jsonable { public TextRow appendLineOfChar(int columns, String character) { String line = character; while (line.length() < columns) { line += character; } return append(line); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
@Test public void canSetImageScale() { PrintPayload payload = new PrintPayload(); Bitmap bmp = getBitmap(); ImageRow imageRow = payload.append(bmp, true); assertThat(imageRow.isScaleToFit()).isTrue(); }
public TextRow append(String text) { return append(text, null); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
@Test(expected = IllegalArgumentException.class) public void appendNullWillThrow() { PrintPayload payload = new PrintPayload(); payload.append((String) null); }
public TextRow append(String text) { return append(text, null); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
PrintPayload implements Jsonable { public TextRow append(String text) { return append(text, null); } PrintPayload(); PrintPayload(String printerId); TextRow append(String text); TextRow append(String text, PrinterFont printerFont); void append(PrintPayload toAppendPayload); void appendEmptyLine(); TextRow appendLineOfChar(int columns, String character); TextRow appendLeftRight(int columns, String left, String right); ImageRow append(Bitmap image); ImageRow append(Bitmap image, boolean scaleToFit); PrintRow[] getRows(); int getCodePage(); void setCodePage(int codePage); String getLanguage(); void setLanguage(String languageCode); void setPrinterId(String printerId); String getPrinterId(); boolean hasPrinterId(); @Override String toJson(); static PrintPayload fromJson(String json); }
@Test public void willSendMessage() throws RemoteException { setupReplyTo(); String message = "hellooooooo"; boolean sent = messengerChannelServer.send(message); assertThat(sent).isTrue(); verifySentMessage(MESSAGE_RESPONSE, message); }
@Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
@Test public void willSendMessageViaWebsocket() throws RemoteException { setupMockBoundMessengerService(); createObservableSendDataAndSubscribe("hellooooo"); sendConnectionParamsFromServer(); setWebSocketConnectedState(true); observableWebSocketClient.sendMessage("This is ground control to Major Tom").test(); verify(observableWebSocketClient.okWebSocketClient).sendMessage("This is ground control to Major Tom"); }
@Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
@Test public void willFallbackToMessengerIfNotConnected() throws RemoteException { setupMockBoundMessengerService(); createObservableSendDataAndSubscribe("connect with me"); sendConnectionParamsFromServer(); setWebSocketConnectedState(false); observableWebSocketClient.sendMessage("Stop messaging me").test(); verify(observableWebSocketClient.okWebSocketClient, times(0)).sendMessage(anyString()); verifyMessagesSentToServerViaMessenger(2); }
@Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public Observable<String> sendMessage(final String message) { if (!super.isConnected()) { return connectAndSendMessage(message); } else { if (responseEmitter == null || responseEmitter.hasComplete()) { responseEmitter = PublishSubject.create(); } if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.updateCallbackEmitter(responseEmitter); okWebSocketClient.sendMessage(message); } else { super.sendMessage(message); } return responseEmitter; } } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
@Test public void willCloseWebSocket() throws RemoteException { setupMockBoundMessengerService(); createObservableSendDataAndSubscribe("lets connect"); sendConnectionParamsFromServer(); setWebSocketConnectedState(true); observableWebSocketClient.closeConnection(); verify(observableWebSocketClient.okWebSocketClient).close(); }
@Override public void closeConnection() { if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.close(); } super.closeConnection(); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public void closeConnection() { if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.close(); } super.closeConnection(); } }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public void closeConnection() { if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.close(); } super.closeConnection(); } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public void closeConnection() { if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.close(); } super.closeConnection(); } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
ObservableWebSocketClient extends ObservableMessengerClient { @Override public void closeConnection() { if (okWebSocketClient != null && okWebSocketClient.isConnected()) { okWebSocketClient.close(); } super.closeConnection(); } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
@Test public void willSendExceptionMessage() throws RemoteException { setupReplyTo(); MessageException message = new MessageException("bleep", "bloop"); boolean sent = messengerChannelServer.send(message); assertThat(sent).isTrue(); verifySentMessage(MESSAGE_ERROR, message.toJson()); }
@Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean send(MessageException error) { Bundle b = new Bundle(); b.putString(KEY_DATA_RESPONSE, error.toJson()); Message message = createMessage(b, MESSAGE_ERROR); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
@Test public void willSendEndMessage() throws RemoteException { setupReplyTo(); boolean sent = messengerChannelServer.sendEndStream(); assertThat(sent).isTrue(); verifySentMessage(MESSAGE_END_STREAM, null); }
@Override public boolean sendEndStream() { Message message = createMessage(null, MESSAGE_END_STREAM); closeClient(); return send(message); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean sendEndStream() { Message message = createMessage(null, MESSAGE_END_STREAM); closeClient(); return send(message); } }
MessengerChannelServer extends BaseChannelServer { @Override public boolean sendEndStream() { Message message = createMessage(null, MESSAGE_END_STREAM); closeClient(); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean sendEndStream() { Message message = createMessage(null, MESSAGE_END_STREAM); closeClient(); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
MessengerChannelServer extends BaseChannelServer { @Override public boolean sendEndStream() { Message message = createMessage(null, MESSAGE_END_STREAM); closeClient(); return send(message); } MessengerChannelServer(String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override void disposeClient(); @Override boolean send(MessageException error); @Override String getClientPackageName(); @Override boolean send(String senddata); @Override boolean sendEndStream(); }
@Test public void willStartServerOnFirstMessage() { setupWebserverConnectionNever(); sendFirstMessage(); verify(webSocketServer).startServer(); }
private void startServer() { setupSendQueue(); setupWebServer(); }
WebSocketChannelServer extends MessengerChannelServer { private void startServer() { setupSendQueue(); setupWebServer(); } }
WebSocketChannelServer extends MessengerChannelServer { private void startServer() { setupSendQueue(); setupWebServer(); } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); }
WebSocketChannelServer extends MessengerChannelServer { private void startServer() { setupSendQueue(); setupWebServer(); } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); }
WebSocketChannelServer extends MessengerChannelServer { private void startServer() { setupSendQueue(); setupWebServer(); } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); static final String CONNECT_PLEASE; static final String CLOSE_MESSAGE; }
@Test public void willDisconnectOnEndStream() { setupWebserverConnection(); sendFirstMessage(); webSocketChannelServer.sendEndStream(); verify(webSocketConnection).disconnect(); }
@Override public boolean sendEndStream() { disconnectedWithEndStreamCall = true; sendMessageQueue.onNext(CLOSE_MESSAGE); Observable.timer(WAIT_FOR_CLOSE_TIMEOUT, TimeUnit.SECONDS, getSendScheduler()) .subscribe(new Consumer<Long>() { @Override public void accept(Long aLong) throws Exception { if (!sendMessageQueue.hasComplete()) { sendMessageQueue.onComplete(); } } }); return true; }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean sendEndStream() { disconnectedWithEndStreamCall = true; sendMessageQueue.onNext(CLOSE_MESSAGE); Observable.timer(WAIT_FOR_CLOSE_TIMEOUT, TimeUnit.SECONDS, getSendScheduler()) .subscribe(new Consumer<Long>() { @Override public void accept(Long aLong) throws Exception { if (!sendMessageQueue.hasComplete()) { sendMessageQueue.onComplete(); } } }); return true; } }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean sendEndStream() { disconnectedWithEndStreamCall = true; sendMessageQueue.onNext(CLOSE_MESSAGE); Observable.timer(WAIT_FOR_CLOSE_TIMEOUT, TimeUnit.SECONDS, getSendScheduler()) .subscribe(new Consumer<Long>() { @Override public void accept(Long aLong) throws Exception { if (!sendMessageQueue.hasComplete()) { sendMessageQueue.onComplete(); } } }); return true; } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean sendEndStream() { disconnectedWithEndStreamCall = true; sendMessageQueue.onNext(CLOSE_MESSAGE); Observable.timer(WAIT_FOR_CLOSE_TIMEOUT, TimeUnit.SECONDS, getSendScheduler()) .subscribe(new Consumer<Long>() { @Override public void accept(Long aLong) throws Exception { if (!sendMessageQueue.hasComplete()) { sendMessageQueue.onComplete(); } } }); return true; } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean sendEndStream() { disconnectedWithEndStreamCall = true; sendMessageQueue.onNext(CLOSE_MESSAGE); Observable.timer(WAIT_FOR_CLOSE_TIMEOUT, TimeUnit.SECONDS, getSendScheduler()) .subscribe(new Consumer<Long>() { @Override public void accept(Long aLong) throws Exception { if (!sendMessageQueue.hasComplete()) { sendMessageQueue.onComplete(); } } }); return true; } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); static final String CONNECT_PLEASE; static final String CLOSE_MESSAGE; }
@Test public void canSendMessageToClient() throws IOException { setupWebserverConnection(); sendFirstMessage(); String msg = "Hello client, are you are ok?"; webSocketChannelServer.send(msg); verify(webSocketConnection).send(msg); }
@Override public boolean send(final String message) { if (webSocketConnection != null && webSocketConnection.isConnected()) { sendMessageQueue.onNext(message); return true; } else { return super.send(message); } }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean send(final String message) { if (webSocketConnection != null && webSocketConnection.isConnected()) { sendMessageQueue.onNext(message); return true; } else { return super.send(message); } } }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean send(final String message) { if (webSocketConnection != null && webSocketConnection.isConnected()) { sendMessageQueue.onNext(message); return true; } else { return super.send(message); } } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean send(final String message) { if (webSocketConnection != null && webSocketConnection.isConnected()) { sendMessageQueue.onNext(message); return true; } else { return super.send(message); } } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); }
WebSocketChannelServer extends MessengerChannelServer { @Override public boolean send(final String message) { if (webSocketConnection != null && webSocketConnection.isConnected()) { sendMessageQueue.onNext(message); return true; } else { return super.send(message); } } WebSocketChannelServer(Context context, String serviceComponentName, String clientPackageName); @Override void handleMessage(Message msg); @Override boolean send(final String message); @Override boolean sendEndStream(); static final String CONNECT_PLEASE; static final String CLOSE_MESSAGE; }
@Test public void willSetupChannelOnFirstMessageIntent() { Intent intent = new Intent(); testAbstractChannelService.onBind(intent); assertThat(testAbstractChannelService.incomingHandler).isNotNull(); testAbstractChannelService.incomingHandler.handleMessage(setupEmptyMessage()); assertThat(testAbstractChannelService.channelServerMap).hasSize(1); assertThat(testAbstractChannelService.channelType).isEqualTo(CHANNEL_MESSENGER); }
@Override @NonNull public final IBinder onBind(Intent intent) { String clientId = getClientIdFromIntent(intent); String channelType = getChannelTypeFromIntent(intent); String clientPackageName = getClientPackageNameFromIntent(intent); Log.d(TAG, String.format("Bound to client %s channel type: %s", clientId, channelType)); return createServiceIncomingHandler(clientId, channelType, clientPackageName); }
AbstractChannelService extends Service { @Override @NonNull public final IBinder onBind(Intent intent) { String clientId = getClientIdFromIntent(intent); String channelType = getChannelTypeFromIntent(intent); String clientPackageName = getClientPackageNameFromIntent(intent); Log.d(TAG, String.format("Bound to client %s channel type: %s", clientId, channelType)); return createServiceIncomingHandler(clientId, channelType, clientPackageName); } }
AbstractChannelService extends Service { @Override @NonNull public final IBinder onBind(Intent intent) { String clientId = getClientIdFromIntent(intent); String channelType = getChannelTypeFromIntent(intent); String clientPackageName = getClientPackageNameFromIntent(intent); Log.d(TAG, String.format("Bound to client %s channel type: %s", clientId, channelType)); return createServiceIncomingHandler(clientId, channelType, clientPackageName); } }
AbstractChannelService extends Service { @Override @NonNull public final IBinder onBind(Intent intent) { String clientId = getClientIdFromIntent(intent); String channelType = getChannelTypeFromIntent(intent); String clientPackageName = getClientPackageNameFromIntent(intent); Log.d(TAG, String.format("Bound to client %s channel type: %s", clientId, channelType)); return createServiceIncomingHandler(clientId, channelType, clientPackageName); } @Override @NonNull final IBinder onBind(Intent intent); @NonNull IBinder createServiceIncomingHandler(String clientId, String channelType, String clientPackageName); void setStopSelfOnEndOfStream(boolean stopSelfOnEndOfStream); @Override boolean onUnbind(Intent intent); @Nullable ChannelServer getChannelServerForId(String clientMessageId); }
AbstractChannelService extends Service { @Override @NonNull public final IBinder onBind(Intent intent) { String clientId = getClientIdFromIntent(intent); String channelType = getChannelTypeFromIntent(intent); String clientPackageName = getClientPackageNameFromIntent(intent); Log.d(TAG, String.format("Bound to client %s channel type: %s", clientId, channelType)); return createServiceIncomingHandler(clientId, channelType, clientPackageName); } @Override @NonNull final IBinder onBind(Intent intent); @NonNull IBinder createServiceIncomingHandler(String clientId, String channelType, String clientPackageName); void setStopSelfOnEndOfStream(boolean stopSelfOnEndOfStream); @Override boolean onUnbind(Intent intent); @Nullable ChannelServer getChannelServerForId(String clientMessageId); }
@Test public void checkCloseConnectionWillUnbindService() throws RemoteException { setupMockBoundMessengerService(); TestObserver<String> obs = createObservableSendDataAndSubscribe(new DataObject()); DataObject response = new DataObject(); sendReply(response); observableMessengerClient.closeConnection(); obs.awaitDone(2000, TimeUnit.MILLISECONDS).assertNoErrors().assertComplete().assertValue(response.toJson()); verifyServiceIsUnbound(); }
public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); boolean isConnected(); Completable connect(); Observable<String> sendMessage(final String requestData); void closeConnection(); }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); boolean isConnected(); Completable connect(); Observable<String> sendMessage(final String requestData); void closeConnection(); }
@Test public void checkWillUseDifferentClientIdsForEachConnection() throws Exception { setupMockBoundMessengerService(); DataObject msg = new DataObject(); createObservableSendDataAndSubscribe(msg); Bundle firstMessage = getReceivedBundle(0); String firstClientId = firstMessage.getString(KEY_CLIENT_ID); observableMessengerClient.closeConnection(); msg = new DataObject(); createObservableSendDataAndSubscribe(msg); Bundle secondMessage = getReceivedBundle(1); String secondClientId = secondMessage.getString(KEY_CLIENT_ID); assertThat(firstClientId).isNotEqualTo(secondClientId); }
public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); boolean isConnected(); Completable connect(); Observable<String> sendMessage(final String requestData); void closeConnection(); }
ObservableMessengerClient extends BaseChannelClient implements ChannelClient { public void closeConnection() { if (messengerConnection != null) { Log.d(TAG, "Closing connection with id: " + messengerConnection.getClientId()); try { context.unbindService(messengerConnection); } catch (Throwable t) { } messengerConnection = null; if (responseEmitter != null) { responseEmitter.onComplete(); responseEmitter = null; } } } ObservableMessengerClient(Context context, ComponentName serviceComponentName); ObservableMessengerClient(Context context, ComponentName serviceComponentName, OnHandleMessageCallback onHandleMessageCallback); boolean isConnected(); Completable connect(); Observable<String> sendMessage(final String requestData); void closeConnection(); }
@Test public void checkBindIntentWillContainCorrectChannel() { Intent bindIntent = observableWebSocketClient.getServiceIntent("7878"); assertThat(bindIntent).isNotNull(); assertThat(bindIntent.hasExtra(KEY_CHANNEL_TYPE)).isTrue(); assertThat(bindIntent.getStringExtra(KEY_CHANNEL_TYPE)).isEqualTo(CHANNEL_WEBSOCKET); assertThat(bindIntent.hasExtra(KEY_CLIENT_ID)).isTrue(); assertThat(bindIntent.getStringExtra(KEY_CLIENT_ID)).isEqualTo("7878"); }
@NonNull protected Intent getServiceIntent(String clientId) { Intent intent = super.getServiceIntent(clientId); intent.putExtra(KEY_CHANNEL_TYPE, CHANNEL_WEBSOCKET); return intent; }
ObservableWebSocketClient extends ObservableMessengerClient { @NonNull protected Intent getServiceIntent(String clientId) { Intent intent = super.getServiceIntent(clientId); intent.putExtra(KEY_CHANNEL_TYPE, CHANNEL_WEBSOCKET); return intent; } }
ObservableWebSocketClient extends ObservableMessengerClient { @NonNull protected Intent getServiceIntent(String clientId) { Intent intent = super.getServiceIntent(clientId); intent.putExtra(KEY_CHANNEL_TYPE, CHANNEL_WEBSOCKET); return intent; } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); }
ObservableWebSocketClient extends ObservableMessengerClient { @NonNull protected Intent getServiceIntent(String clientId) { Intent intent = super.getServiceIntent(clientId); intent.putExtra(KEY_CHANNEL_TYPE, CHANNEL_WEBSOCKET); return intent; } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
ObservableWebSocketClient extends ObservableMessengerClient { @NonNull protected Intent getServiceIntent(String clientId) { Intent intent = super.getServiceIntent(clientId); intent.putExtra(KEY_CHANNEL_TYPE, CHANNEL_WEBSOCKET); return intent; } ObservableWebSocketClient(Context context, ComponentName serviceComponentName); @Override Completable connect(); @Override boolean isConnected(); @Override Observable<String> sendMessage(final String message); @Override void closeConnection(); }
@Test public void testDisplayName_Empty(){ BaseHumanName name = new BaseHumanName(); String displayName = name.getDisplayName(); assertThat(displayName, not(nullValue())); assertThat(displayName, equalTo("")); }
public String getDisplayName(){ List<String> names = Arrays.asList(getPrefix(), getGivenName(), getFamilyName(), getSuffix()); return names.stream().filter(Objects::nonNull).collect(Collectors.joining(" ")); }
BaseHumanName extends BaseResource { public String getDisplayName(){ List<String> names = Arrays.asList(getPrefix(), getGivenName(), getFamilyName(), getSuffix()); return names.stream().filter(Objects::nonNull).collect(Collectors.joining(" ")); } }
BaseHumanName extends BaseResource { public String getDisplayName(){ List<String> names = Arrays.asList(getPrefix(), getGivenName(), getFamilyName(), getSuffix()); return names.stream().filter(Objects::nonNull).collect(Collectors.joining(" ")); } }
BaseHumanName extends BaseResource { public String getDisplayName(){ List<String> names = Arrays.asList(getPrefix(), getGivenName(), getFamilyName(), getSuffix()); return names.stream().filter(Objects::nonNull).collect(Collectors.joining(" ")); } HumanName.NameUse getNameUse(); void setNameUse(HumanName.NameUse nameUse); String getPrefix(); void setPrefix(String prefix); String getGivenName(); void setGivenName(String givenName); String getFamilyName(); void setFamilyName(String familyName); String getSuffix(); void setSuffix(String suffix); String getDisplayName(); @Override Long getId(); }
BaseHumanName extends BaseResource { public String getDisplayName(){ List<String> names = Arrays.asList(getPrefix(), getGivenName(), getFamilyName(), getSuffix()); return names.stream().filter(Objects::nonNull).collect(Collectors.joining(" ")); } HumanName.NameUse getNameUse(); void setNameUse(HumanName.NameUse nameUse); String getPrefix(); void setPrefix(String prefix); String getGivenName(); void setGivenName(String givenName); String getFamilyName(); void setFamilyName(String familyName); String getSuffix(); void setSuffix(String suffix); String getDisplayName(); @Override Long getId(); }
@Test public void testValidateUrl() { assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("http: assertTrue(ValidateUtils.validateUrl("ftp: assertTrue(ValidateUtils.validateUrl("file: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertTrue(ValidateUtils.validateUrl("https: assertFalse(ValidateUtils.validateUrl("")); assertFalse(ValidateUtils.validateUrl("ht: assertFalse(ValidateUtils.validateUrl("http/www.test.com")); assertFalse(ValidateUtils.validateUrl("http: assertFalse(ValidateUtils.validateUrl("http: assertFalse(ValidateUtils.validateUrl("http: assertFalse(ValidateUtils.validateUrl("http56: assertFalse(ValidateUtils.validateUrl("http assertFalse(ValidateUtils.validateUrl("http assertFalse(ValidateUtils.validateUrl("httpstest.com")); assertFalse(ValidateUtils.validateUrl("test")); assertFalse(ValidateUtils.validateUrl("test.co.in")); assertFalse(ValidateUtils.validateUrl(".com")); }
public static boolean validateUrl(String urlStr) { Matcher matcher = VALID_URL_REGEX .matcher(urlStr); return matcher.find(); }
ValidateUtils { public static boolean validateUrl(String urlStr) { Matcher matcher = VALID_URL_REGEX .matcher(urlStr); return matcher.find(); } }
ValidateUtils { public static boolean validateUrl(String urlStr) { Matcher matcher = VALID_URL_REGEX .matcher(urlStr); return matcher.find(); } private ValidateUtils(); }
ValidateUtils { public static boolean validateUrl(String urlStr) { Matcher matcher = VALID_URL_REGEX .matcher(urlStr); return matcher.find(); } private ValidateUtils(); static boolean validateEmail(String emailStr); static boolean validateUrl(String urlStr); }
ValidateUtils { public static boolean validateUrl(String urlStr) { Matcher matcher = VALID_URL_REGEX .matcher(urlStr); return matcher.find(); } private ValidateUtils(); static boolean validateEmail(String emailStr); static boolean validateUrl(String urlStr); }
@Test public void testValidateEmail() { assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("[email protected]")); assertTrue(ValidateUtils.validateEmail("abc+-%[email protected]")); assertFalse(ValidateUtils.validateEmail("")); assertFalse(ValidateUtils.validateEmail("abcdefg.com")); assertFalse(ValidateUtils.validateEmail("[email protected]")); assertFalse(ValidateUtils.validateEmail("ab*[email protected]")); assertFalse(ValidateUtils.validateEmail("abcd")); assertFalse(ValidateUtils.validateEmail("abc@abc")); assertFalse(ValidateUtils.validateEmail("abc@@hjg.com")); assertFalse(ValidateUtils.validateEmail("[email protected]++-")); assertFalse(ValidateUtils.validateEmail("abc@ab/*c")); assertFalse(ValidateUtils.validateEmail("abc@ab/*c.com")); assertFalse(ValidateUtils.validateEmail("[email protected]")); assertFalse(ValidateUtils.validateEmail("[email protected]")); assertFalse(ValidateUtils.validateEmail("[email protected]")); }
public static boolean validateEmail(String emailStr) { Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(emailStr); return matcher.find(); }
ValidateUtils { public static boolean validateEmail(String emailStr) { Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(emailStr); return matcher.find(); } }
ValidateUtils { public static boolean validateEmail(String emailStr) { Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(emailStr); return matcher.find(); } private ValidateUtils(); }
ValidateUtils { public static boolean validateEmail(String emailStr) { Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(emailStr); return matcher.find(); } private ValidateUtils(); static boolean validateEmail(String emailStr); static boolean validateUrl(String urlStr); }
ValidateUtils { public static boolean validateEmail(String emailStr) { Matcher matcher = VALID_EMAIL_ADDRESS_REGEX .matcher(emailStr); return matcher.find(); } private ValidateUtils(); static boolean validateEmail(String emailStr); static boolean validateUrl(String urlStr); }
@Test public void shouldReturnFireBaseLiveData() { FirebaseAuthLiveData expectedData = authRepository.getFirebaseAuthLiveData(); assertThat(expectedData, equalTo(firebaseAuthLiveData)); }
@NonNull public FirebaseAuthLiveData getFirebaseAuthLiveData() { return firebaseAuthLiveData; }
AuthRepository { @NonNull public FirebaseAuthLiveData getFirebaseAuthLiveData() { return firebaseAuthLiveData; } }
AuthRepository { @NonNull public FirebaseAuthLiveData getFirebaseAuthLiveData() { return firebaseAuthLiveData; } @Inject AuthRepository(FirebaseAuthLiveData firebaseAuthLiveData); }
AuthRepository { @NonNull public FirebaseAuthLiveData getFirebaseAuthLiveData() { return firebaseAuthLiveData; } @Inject AuthRepository(FirebaseAuthLiveData firebaseAuthLiveData); @NonNull FirebaseAuthLiveData getFirebaseAuthLiveData(); }
AuthRepository { @NonNull public FirebaseAuthLiveData getFirebaseAuthLiveData() { return firebaseAuthLiveData; } @Inject AuthRepository(FirebaseAuthLiveData firebaseAuthLiveData); @NonNull FirebaseAuthLiveData getFirebaseAuthLiveData(); }
@Test public void shouldSetUser() { profileRepository.setUser(MOCK_USER); Mockito.verify(databaseReference).setValue(MOCK_USER); }
public void setUser(User user) { databaseReference.setValue(user); }
ProfileRepository { public void setUser(User user) { databaseReference.setValue(user); } }
ProfileRepository { public void setUser(User user) { databaseReference.setValue(user); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); }
ProfileRepository { public void setUser(User user) { databaseReference.setValue(user); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); }
ProfileRepository { public void setUser(User user) { databaseReference.setValue(user); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); @VisibleForTesting public FirebaseDatabaseLiveData firebaseDatabaseLiveData; public DatabaseReference databaseReference; }
@Test public void shouldSetField() { Mockito.when(databaseReference.child(UserFieldType.USERNAME.key)) .thenReturn(databaseReference); profileRepository.setField(UserFieldType.USERNAME, "test_username"); Mockito.verify(databaseReference).setValue("test_username"); }
public void setField(UserFieldType userFieldType, String profileField) { databaseReference.child(userFieldType.key).setValue(profileField); }
ProfileRepository { public void setField(UserFieldType userFieldType, String profileField) { databaseReference.child(userFieldType.key).setValue(profileField); } }
ProfileRepository { public void setField(UserFieldType userFieldType, String profileField) { databaseReference.child(userFieldType.key).setValue(profileField); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); }
ProfileRepository { public void setField(UserFieldType userFieldType, String profileField) { databaseReference.child(userFieldType.key).setValue(profileField); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); }
ProfileRepository { public void setField(UserFieldType userFieldType, String profileField) { databaseReference.child(userFieldType.key).setValue(profileField); } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); @VisibleForTesting public FirebaseDatabaseLiveData firebaseDatabaseLiveData; public DatabaseReference databaseReference; }
@Test public void shouldReturnUserLiveData() { LiveData<DataSnapshot> userLiveData = profileRepository.getUser(); Assert.assertSame(userLiveData, profileRepository.firebaseDatabaseLiveData); }
@NonNull public LiveData<DataSnapshot> getUser() { return firebaseDatabaseLiveData; }
ProfileRepository { @NonNull public LiveData<DataSnapshot> getUser() { return firebaseDatabaseLiveData; } }
ProfileRepository { @NonNull public LiveData<DataSnapshot> getUser() { return firebaseDatabaseLiveData; } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); }
ProfileRepository { @NonNull public LiveData<DataSnapshot> getUser() { return firebaseDatabaseLiveData; } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); }
ProfileRepository { @NonNull public LiveData<DataSnapshot> getUser() { return firebaseDatabaseLiveData; } @Inject ProfileRepository(DatabaseReference databaseReference, FirebaseAuth firebaseAuth); @NonNull LiveData<DataSnapshot> getUser(); void setUser(User user); void setField(UserFieldType userFieldType, String profileField); @VisibleForTesting public FirebaseDatabaseLiveData firebaseDatabaseLiveData; public DatabaseReference databaseReference; }
@Test public void shouldLoadContests_when_reloadFalse_and_rateLimitFalse() { Mockito.when(repoListRateLimit.shouldFetch(CodingCalendarViewModel.CONTEST_VIEW_MODEL)) .thenReturn(false); codingCalendarViewModel.loadContests(false); Mockito.verify(codingCalendarRepository, Mockito.never()).fetchContests(Mockito.anyBoolean()); Mockito.verify(repoListRateLimit, Mockito.never()).refreshRateLimiter(CodingCalendarViewModel.CONTEST_VIEW_MODEL); }
protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
@Test public void shouldLoadContests_when_reloadTrue_and_rateLimitFalse() { codingCalendarViewModel.loadContests(true); Mockito.verify(codingCalendarRepository).fetchContests(true); Mockito.verify(repoListRateLimit).refreshRateLimiter(CodingCalendarViewModel.CONTEST_VIEW_MODEL); }
protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
@Test public void shouldLoadContests_when_reloadFalse_and_rateLimitTrue() { Mockito.when(repoListRateLimit.shouldFetch(CodingCalendarViewModel.CONTEST_VIEW_MODEL)) .thenReturn(true); codingCalendarViewModel.loadContests(false); Mockito.verify(codingCalendarRepository).fetchContests(false); Mockito.verify(repoListRateLimit).refreshRateLimiter(CodingCalendarViewModel.CONTEST_VIEW_MODEL); }
protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
CodingCalendarViewModel extends ViewModel { protected LiveData<Resource<List<Contest>>> loadContests(boolean reload) { if (reload || repoListRateLimit.shouldFetch(CONTEST_VIEW_MODEL)) { contests = codingCalendarRepository.fetchContests(reload); repoListRateLimit.refreshRateLimiter(CONTEST_VIEW_MODEL); } return contests; } @Inject CodingCalendarViewModel(CodingCalendarRepository codingCalendarRepository, RateLimiter<String> repoListRateLimit); void openContestDetails(Long id); }
@Test public void testEarliestWindow() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); assertNull(aggregator.earliestWindow()); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 0, WINDOW_MS, _metricDef); assertEquals(WINDOW_MS, aggregator.earliestWindow().longValue()); CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, NUM_WINDOWS, WINDOW_MS, _metricDef); assertEquals(2 * WINDOW_MS, aggregator.earliestWindow().longValue()); }
public Long earliestWindow() { return _rawMetrics.isEmpty() ? null : _oldestWindowIndex * _windowMs; }
MetricSampleAggregator extends LongGenerationed { public Long earliestWindow() { return _rawMetrics.isEmpty() ? null : _oldestWindowIndex * _windowMs; } }
MetricSampleAggregator extends LongGenerationed { public Long earliestWindow() { return _rawMetrics.isEmpty() ? null : _oldestWindowIndex * _windowMs; } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public Long earliestWindow() { return _rawMetrics.isEmpty() ? null : _oldestWindowIndex * _windowMs; } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public Long earliestWindow() { return _rawMetrics.isEmpty() ? null : _oldestWindowIndex * _windowMs; } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testAggregationOption7() { MetricSampleAggregator<String, IntegerEntity> aggregator = prepareCompletenessTestEnv(); AggregationOptions<String, IntegerEntity> options = new AggregationOptions<>(0.5, 0.0, NUM_WINDOWS, 1, new HashSet<>(Arrays.asList(ENTITY1, ENTITY2, ENTITY3)), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, IntegerEntity> completeness = aggregator.completeness(-1, Long.MAX_VALUE, options); assertEquals(16, completeness.validWindowIndices().size()); assertFalse(completeness.validWindowIndices().contains(3L)); assertFalse(completeness.validWindowIndices().contains(4L)); assertFalse(completeness.validWindowIndices().contains(20L)); assertFalse(completeness.validWindowIndices().contains(11L)); assertEquals(2, completeness.validEntities().size()); assertTrue(completeness.validEntities().contains(ENTITY1)); assertTrue(completeness.validEntities().contains(ENTITY3)); assertCompletenessByWindowIndex(completeness, Collections.singleton(11L)); assertEquals(1.0f / 3, completeness.extrapolatedEntitiesByWindowIndex().get(11L).doubleValue(), EPSILON); assertEquals(1.0f / 3, completeness.validEntityRatioByWindowIndex().get(11L), EPSILON); assertEquals(1.0f / 3, completeness.validEntityRatioWithGroupGranularityByWindowIndex().get(11L), EPSILON); assertEquals(0.5, completeness.validEntityGroupRatioByWindowIndex().get(11L), EPSILON); }
public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testPeekCurrentWindow() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); populateSampleAggregator(2, 1, aggregator, ENTITY1); populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY2); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY3, 0, WINDOW_MS, _metricDef); Map<IntegerEntity, ValuesAndExtrapolations> currentWindowMetrics = aggregator.peekCurrentWindow(); assertEquals(FORCED_INSUFFICIENT, currentWindowMetrics.get(ENTITY1).extrapolations().get(0)); assertTrue(currentWindowMetrics.get(ENTITY2).extrapolations().isEmpty()); assertEquals(NO_VALID_EXTRAPOLATION, currentWindowMetrics.get(ENTITY3).extrapolations().get(0)); }
public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } }
MetricSampleAggregator extends LongGenerationed { public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } } }
MetricSampleAggregator extends LongGenerationed { public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testAdd() { Map<Short, MetricValues> valuesByMetricId = getValuesByMetricId(); AggregatedMetricValues aggregatedMetricValues = new AggregatedMetricValues(valuesByMetricId); aggregatedMetricValues.add(aggregatedMetricValues); for (Map.Entry<Short, MetricValues> entry : valuesByMetricId.entrySet()) { MetricValues values = entry.getValue(); for (int j = 0; j < 10; j++) { assertEquals(2 * j, values.get(j), 0.01); } } }
public void add(short metricId, MetricValues metricValuesToAdd) { if (metricValuesToAdd == null) { throw new IllegalArgumentException("The metric values to be added cannot be null"); } if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); }
AggregatedMetricValues { public void add(short metricId, MetricValues metricValuesToAdd) { if (metricValuesToAdd == null) { throw new IllegalArgumentException("The metric values to be added cannot be null"); } if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); } }
AggregatedMetricValues { public void add(short metricId, MetricValues metricValuesToAdd) { if (metricValuesToAdd == null) { throw new IllegalArgumentException("The metric values to be added cannot be null"); } if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); }
AggregatedMetricValues { public void add(short metricId, MetricValues metricValuesToAdd) { if (metricValuesToAdd == null) { throw new IllegalArgumentException("The metric values to be added cannot be null"); } if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); MetricValues valuesFor(short metricId); AggregatedMetricValues valuesFor(Collection<Short> metricIds, boolean shareValueArray); MetricValues valuesForGroup(String group, MetricDef metricDef, boolean shareValueArray); int length(); boolean isEmpty(); Set<Short> metricIds(); void add(short metricId, MetricValues metricValuesToAdd); void add(AggregatedMetricValues other); void subtract(AggregatedMetricValues other); void clear(); void writeTo(OutputStream out); @Override String toString(); }
AggregatedMetricValues { public void add(short metricId, MetricValues metricValuesToAdd) { if (metricValuesToAdd == null) { throw new IllegalArgumentException("The metric values to be added cannot be null"); } if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); MetricValues valuesFor(short metricId); AggregatedMetricValues valuesFor(Collection<Short> metricIds, boolean shareValueArray); MetricValues valuesForGroup(String group, MetricDef metricDef, boolean shareValueArray); int length(); boolean isEmpty(); Set<Short> metricIds(); void add(short metricId, MetricValues metricValuesToAdd); void add(AggregatedMetricValues other); void subtract(AggregatedMetricValues other); void clear(); void writeTo(OutputStream out); @Override String toString(); }
@Test public void testDeduct() { Map<Short, MetricValues> valuesByMetricId = getValuesByMetricId(); AggregatedMetricValues aggregatedMetricValues = new AggregatedMetricValues(valuesByMetricId); aggregatedMetricValues.subtract(aggregatedMetricValues); for (Map.Entry<Short, MetricValues> entry : valuesByMetricId.entrySet()) { MetricValues values = entry.getValue(); for (int j = 0; j < 10; j++) { assertEquals(0, values.get(j), 0.01); } } }
public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } }
AggregatedMetricValues { public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } } }
AggregatedMetricValues { public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); }
AggregatedMetricValues { public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); MetricValues valuesFor(short metricId); AggregatedMetricValues valuesFor(Collection<Short> metricIds, boolean shareValueArray); MetricValues valuesForGroup(String group, MetricDef metricDef, boolean shareValueArray); int length(); boolean isEmpty(); Set<Short> metricIds(); void add(short metricId, MetricValues metricValuesToAdd); void add(AggregatedMetricValues other); void subtract(AggregatedMetricValues other); void clear(); void writeTo(OutputStream out); @Override String toString(); }
AggregatedMetricValues { public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } } AggregatedMetricValues(); AggregatedMetricValues(Map<Short, MetricValues> valuesByMetricId); MetricValues valuesFor(short metricId); AggregatedMetricValues valuesFor(Collection<Short> metricIds, boolean shareValueArray); MetricValues valuesForGroup(String group, MetricDef metricDef, boolean shareValueArray); int length(); boolean isEmpty(); Set<Short> metricIds(); void add(short metricId, MetricValues metricValuesToAdd); void add(AggregatedMetricValues other); void subtract(AggregatedMetricValues other); void clear(); void writeTo(OutputStream out); @Override String toString(); }
@Test public void testExtrapolationAdjacentAvgAtMiddle() { RawMetricValues rawValues = new RawMetricValues(NUM_WINDOWS_TO_KEEP, MIN_SAMPLES_PER_WINDOW, NUM_RAW_METRICS); prepareWindowMissingAtIndex(rawValues, 1); ValuesAndExtrapolations valuesAndExtrapolations = aggregate(rawValues, allWindowIndices(0)); assertEquals(11.5, valuesAndExtrapolations.metricValues().valuesFor((short) 0).get(1), EPSILON); assertEquals(13.0, valuesAndExtrapolations.metricValues().valuesFor((short) 1).get(1), EPSILON); assertEquals(13.0, valuesAndExtrapolations.metricValues().valuesFor((short) 2).get(1), EPSILON); assertEquals(1, valuesAndExtrapolations.extrapolations().size()); Assert.assertEquals(Extrapolation.AVG_ADJACENT, valuesAndExtrapolations.extrapolations().get(1)); }
public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
@Test public void testExtrapolationAdjacentAvgAtLeftEdge() { RawMetricValues rawValues = new RawMetricValues(NUM_WINDOWS_TO_KEEP, MIN_SAMPLES_PER_WINDOW, NUM_RAW_METRICS); prepareWindowMissingAtIndex(rawValues, 0); ValuesAndExtrapolations valuesAndExtrapolations = aggregate(rawValues, allWindowIndices(0)); assertEquals(0, valuesAndExtrapolations.metricValues().valuesFor((short) 0).get(0), EPSILON); assertEquals(0, valuesAndExtrapolations.metricValues().valuesFor((short) 1).get(0), EPSILON); assertEquals(0, valuesAndExtrapolations.metricValues().valuesFor((short) 2).get(0), EPSILON); assertEquals(1, valuesAndExtrapolations.extrapolations().size()); Assert.assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, valuesAndExtrapolations.extrapolations().get(0)); }
public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
RawMetricValues extends WindowIndexedArrays { public synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef) { return aggregate(windowIndices, metricDef, true); } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
@Test public void testIsValid() { RawMetricValues rawValues = new RawMetricValues(NUM_WINDOWS_TO_KEEP, MIN_SAMPLES_PER_WINDOW, NUM_RAW_METRICS); rawValues.updateOldestWindowIndex(0); MetricSample<String, IntegerEntity> m = getMetricSample(10, 10, 10); for (int i = 0; i < NUM_WINDOWS_TO_KEEP; i++) { for (int j = 0; j < MIN_SAMPLES_PER_WINDOW - 1; j++) { addSample(rawValues, m, i); } } assertTrue(rawValues.isValid(5)); assertFalse(rawValues.isValid(4)); addSample(rawValues, m, 0); assertTrue(rawValues.isValid(4)); }
public synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation) { int currentArrayIndex = arrayIndex(currentWindowIndex()); int numValidIndicesAdjustment = _validity.get(currentArrayIndex) ? 1 : 0; boolean allIndicesValid = _validity.cardinality() - numValidIndicesAdjustment == _counts.length - 1; return allIndicesValid && numWindowsWithExtrapolation() <= maxAllowedWindowsWithExtrapolation; }
RawMetricValues extends WindowIndexedArrays { public synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation) { int currentArrayIndex = arrayIndex(currentWindowIndex()); int numValidIndicesAdjustment = _validity.get(currentArrayIndex) ? 1 : 0; boolean allIndicesValid = _validity.cardinality() - numValidIndicesAdjustment == _counts.length - 1; return allIndicesValid && numWindowsWithExtrapolation() <= maxAllowedWindowsWithExtrapolation; } }
RawMetricValues extends WindowIndexedArrays { public synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation) { int currentArrayIndex = arrayIndex(currentWindowIndex()); int numValidIndicesAdjustment = _validity.get(currentArrayIndex) ? 1 : 0; boolean allIndicesValid = _validity.cardinality() - numValidIndicesAdjustment == _counts.length - 1; return allIndicesValid && numWindowsWithExtrapolation() <= maxAllowedWindowsWithExtrapolation; } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); }
RawMetricValues extends WindowIndexedArrays { public synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation) { int currentArrayIndex = arrayIndex(currentWindowIndex()); int numValidIndicesAdjustment = _validity.get(currentArrayIndex) ? 1 : 0; boolean allIndicesValid = _validity.cardinality() - numValidIndicesAdjustment == _counts.length - 1; return allIndicesValid && numWindowsWithExtrapolation() <= maxAllowedWindowsWithExtrapolation; } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
RawMetricValues extends WindowIndexedArrays { public synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation) { int currentArrayIndex = arrayIndex(currentWindowIndex()); int numValidIndicesAdjustment = _validity.get(currentArrayIndex) ? 1 : 0; boolean allIndicesValid = _validity.cardinality() - numValidIndicesAdjustment == _counts.length - 1; return allIndicesValid && numWindowsWithExtrapolation() <= maxAllowedWindowsWithExtrapolation; } RawMetricValues(int numWindowsToKeep, byte minSamplesPerWindow, int numMetricTypesInSample); synchronized void addSample(MetricSample<?, ?> sample, long windowIndex, MetricDef metricDef); synchronized void updateOldestWindowIndex(long newOldestWindowIndex); synchronized boolean isValid(int maxAllowedWindowsWithExtrapolation); synchronized int numWindowsWithExtrapolation(); synchronized boolean isValidAtWindowIndex(long windowIndex); synchronized boolean isExtrapolatedAtWindowIndex(long windowIndex); synchronized byte sampleCountsAtWindowIndex(long windowIndex); synchronized void sanityCheckWindowIndex(long windowIndex); synchronized void sanityCheckWindowRangeReset(long startingWindowIndex, int numWindowIndicesToReset); synchronized int resetWindowIndices(long startingWindowIndex, int numWindowIndicesToReset); synchronized ValuesAndExtrapolations aggregate(SortedSet<Long> windowIndices, MetricDef metricDef); synchronized ValuesAndExtrapolations peekCurrentWindow(long currentWindowIndex, MetricDef metricDef); synchronized int numSamples(); }
@Test public void testSanityCheckDryRun() throws InterruptedException, ExecutionException, TimeoutException { KafkaCruiseControlConfig config = EasyMock.mock(KafkaCruiseControlConfig.class); Time time = EasyMock.mock(Time.class); AnomalyDetectorManager anomalyDetectorManager = EasyMock.mock(AnomalyDetectorManager.class); Executor executor = EasyMock.strictMock(Executor.class); LoadMonitor loadMonitor = EasyMock.mock(LoadMonitor.class); ExecutorService goalOptimizerExecutor = EasyMock.mock(ExecutorService.class); GoalOptimizer goalOptimizer = EasyMock.mock(GoalOptimizer.class); EasyMock.expect(executor.hasOngoingExecution()).andReturn(true).times(2); EasyMock.expect(executor.hasOngoingExecution()).andReturn(false).once(); EasyMock.expect(executor.hasOngoingPartitionReassignments()).andReturn(true); EasyMock.expect(executor.hasOngoingExecution()).andReturn(false).once(); EasyMock.expect(executor.hasOngoingPartitionReassignments()).andReturn(false); EasyMock.expect(executor.hasOngoingLeaderElection()).andReturn(true); EasyMock.expect(executor.hasOngoingExecution()).andReturn(false).once(); EasyMock.expect(executor.hasOngoingPartitionReassignments()).andReturn(false); EasyMock.expect(executor.hasOngoingLeaderElection()).andReturn(false); EasyMock.replay(config, time, anomalyDetectorManager, executor, loadMonitor, goalOptimizerExecutor, goalOptimizer); KafkaCruiseControl kafkaCruiseControl = new KafkaCruiseControl(config, time, anomalyDetectorManager, executor, loadMonitor, goalOptimizerExecutor, goalOptimizer); kafkaCruiseControl.sanityCheckDryRun(true, false); kafkaCruiseControl.sanityCheckDryRun(true, true); kafkaCruiseControl.sanityCheckDryRun(false, true); assertThrows(IllegalStateException.class, () -> kafkaCruiseControl.sanityCheckDryRun(false, false)); assertThrows(IllegalStateException.class, () -> kafkaCruiseControl.sanityCheckDryRun(false, false)); assertThrows(IllegalStateException.class, () -> kafkaCruiseControl.sanityCheckDryRun(false, false)); kafkaCruiseControl.sanityCheckDryRun(false, false); EasyMock.verify(config, time, anomalyDetectorManager, executor, loadMonitor, goalOptimizerExecutor, goalOptimizer); }
public void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution) { if (dryRun) { return; } if (hasOngoingExecution()) { if (!stopOngoingExecution) { throw new IllegalStateException(String.format("Cannot start a new execution while there is an ongoing execution. " + "Please use %s=true to stop ongoing execution and start a new one.", STOP_ONGOING_EXECUTION_PARAM)); } } else { if (_executor.hasOngoingPartitionReassignments()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing partition reassignments initiated by " + "external agent."); } else if (_executor.hasOngoingLeaderElection()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing leadership reassignments initiated by " + "external agent."); } } }
KafkaCruiseControl { public void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution) { if (dryRun) { return; } if (hasOngoingExecution()) { if (!stopOngoingExecution) { throw new IllegalStateException(String.format("Cannot start a new execution while there is an ongoing execution. " + "Please use %s=true to stop ongoing execution and start a new one.", STOP_ONGOING_EXECUTION_PARAM)); } } else { if (_executor.hasOngoingPartitionReassignments()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing partition reassignments initiated by " + "external agent."); } else if (_executor.hasOngoingLeaderElection()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing leadership reassignments initiated by " + "external agent."); } } } }
KafkaCruiseControl { public void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution) { if (dryRun) { return; } if (hasOngoingExecution()) { if (!stopOngoingExecution) { throw new IllegalStateException(String.format("Cannot start a new execution while there is an ongoing execution. " + "Please use %s=true to stop ongoing execution and start a new one.", STOP_ONGOING_EXECUTION_PARAM)); } } else { if (_executor.hasOngoingPartitionReassignments()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing partition reassignments initiated by " + "external agent."); } else if (_executor.hasOngoingLeaderElection()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing leadership reassignments initiated by " + "external agent."); } } } KafkaCruiseControl(KafkaCruiseControlConfig config, MetricRegistry dropwizardMetricRegistry); KafkaCruiseControl(KafkaCruiseControlConfig config, Time time, AnomalyDetectorManager anomalyDetectorManager, Executor executor, LoadMonitor loadMonitor, ExecutorService goalOptimizerExecutor, GoalOptimizer goalOptimizer); }
KafkaCruiseControl { public void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution) { if (dryRun) { return; } if (hasOngoingExecution()) { if (!stopOngoingExecution) { throw new IllegalStateException(String.format("Cannot start a new execution while there is an ongoing execution. " + "Please use %s=true to stop ongoing execution and start a new one.", STOP_ONGOING_EXECUTION_PARAM)); } } else { if (_executor.hasOngoingPartitionReassignments()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing partition reassignments initiated by " + "external agent."); } else if (_executor.hasOngoingLeaderElection()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing leadership reassignments initiated by " + "external agent."); } } } KafkaCruiseControl(KafkaCruiseControlConfig config, MetricRegistry dropwizardMetricRegistry); KafkaCruiseControl(KafkaCruiseControlConfig config, Time time, AnomalyDetectorManager anomalyDetectorManager, Executor executor, LoadMonitor loadMonitor, ExecutorService goalOptimizerExecutor, GoalOptimizer goalOptimizer); LoadMonitor loadMonitor(); MetadataClient.ClusterAndGeneration refreshClusterAndGeneration(); LoadMonitorTaskRunner.LoadMonitorTaskRunnerState getLoadMonitorTaskRunnerState(); LoadMonitor.AutoCloseableSemaphore acquireForModelGeneration(OperationProgress operationProgress); long timeMs(); void sleep(long ms); void startUp(); void shutdown(); void setUserTaskManagerInExecutor(UserTaskManager userTaskManager); void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution); boolean hasOngoingExecution(); boolean modifyOngoingExecution(boolean modify); BrokerStats cachedBrokerLoadStats(boolean allowCapacityEstimation); ClusterModel clusterModel(ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean populateReplicaPlacementInfo, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterCapacity(); void bootstrap(Long startMs, Long endMs, boolean clearMetrics); void pauseMetricSampling(String reason); void train(Long startMs, Long endMs); boolean setSelfHealingFor(AnomalyType anomalyType, boolean isSelfHealingEnabled); boolean setConcurrencyAdjusterFor(ConcurrencyType concurrencyType, boolean isConcurrencyAdjusterEnabled); boolean dropRecentBrokers(Set<Integer> brokersToDrop, boolean isRemoved); void addRecentBrokersPermanently(Set<Integer> brokersToAdd, boolean isRemoved); Set<Integer> recentBrokers(boolean isRemoved); void setRequestedExecutionProgressCheckIntervalMs(Long requestedExecutionProgressCheckIntervalMs); void setRequestedInterBrokerPartitionMovementConcurrency(Integer requestedInterBrokerPartitionMovementConcurrency); void setRequestedIntraBrokerPartitionMovementConcurrency(Integer requestedIntraBrokerPartitionMovementConcurrency); void setRequestedLeadershipMovementConcurrency(Integer requestedLeadershipMovementConcurrency); void resumeMetricSampling(String reason); OptimizerResult getProposals(OperationProgress operationProgress, boolean allowCapacityEstimation); boolean ignoreProposalCache(List<String> goals, ModelCompletenessRequirements requirements, Pattern excludedTopics, boolean excludeBrokers, boolean ignoreProposalCache, boolean isTriggeredByGoalViolation, Set<Integer> requestedDestinationBrokerIds, boolean isRebalanceDiskMode); synchronized OptimizerResult optimizations(ClusterModel clusterModel, List<Goal> goalsByPriority, OperationProgress operationProgress, Map<TopicPartition, List<ReplicaPlacementInfo>> initReplicaDistribution, OptimizationOptions optimizationOptions); Set<String> excludedTopics(ClusterModel clusterModel, Pattern requestedExcludedTopics); KafkaCruiseControlConfig config(); void executeProposals(Set<ExecutionProposal> proposals, Set<Integer> unthrottledBrokers, boolean isKafkaAssignerMode, Integer concurrentInterBrokerPartitionMovements, Integer concurrentIntraBrokerPartitionMovements, Integer concurrentLeaderMovements, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid, boolean skipAutoRefreshingConcurrency); void executeRemoval(Set<ExecutionProposal> proposals, boolean throttleDecommissionedBroker, Set<Integer> removedBrokers, boolean isKafkaAssignerMode, Integer concurrentInterBrokerPartitionMovements, Integer concurrentLeaderMovements, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void executeDemotion(Set<ExecutionProposal> proposals, Set<Integer> demotedBrokers, Integer concurrentLeaderMovements, int brokerCount, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void userTriggeredStopExecution(boolean forceExecutionStop); void setGeneratingProposalsForExecution(String uuid, Supplier<String> reasonSupplier, boolean isTriggeredByUserRequest); synchronized void failGeneratingProposalsForExecution(String uuid); long executionProgressCheckIntervalMs(); ExecutorState.State executionState(); ExecutorState executorState(); LoadMonitorState monitorState(Cluster cluster); AnalyzerState analyzerState(Cluster cluster); AnomalyDetectorState anomalyDetectorState(); Cluster kafkaCluster(); TopicConfigProvider topicConfigProvider(); static String cruiseControlVersion(); static String cruiseControlCommitId(); ModelCompletenessRequirements modelCompletenessRequirements(Collection<Goal> goals); boolean meetCompletenessRequirements(List<String> goals); void sanityCheckBrokerPresence(Set<Integer> brokerIds); }
KafkaCruiseControl { public void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution) { if (dryRun) { return; } if (hasOngoingExecution()) { if (!stopOngoingExecution) { throw new IllegalStateException(String.format("Cannot start a new execution while there is an ongoing execution. " + "Please use %s=true to stop ongoing execution and start a new one.", STOP_ONGOING_EXECUTION_PARAM)); } } else { if (_executor.hasOngoingPartitionReassignments()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing partition reassignments initiated by " + "external agent."); } else if (_executor.hasOngoingLeaderElection()) { throw new IllegalStateException("Cannot execute new proposals while there are ongoing leadership reassignments initiated by " + "external agent."); } } } KafkaCruiseControl(KafkaCruiseControlConfig config, MetricRegistry dropwizardMetricRegistry); KafkaCruiseControl(KafkaCruiseControlConfig config, Time time, AnomalyDetectorManager anomalyDetectorManager, Executor executor, LoadMonitor loadMonitor, ExecutorService goalOptimizerExecutor, GoalOptimizer goalOptimizer); LoadMonitor loadMonitor(); MetadataClient.ClusterAndGeneration refreshClusterAndGeneration(); LoadMonitorTaskRunner.LoadMonitorTaskRunnerState getLoadMonitorTaskRunnerState(); LoadMonitor.AutoCloseableSemaphore acquireForModelGeneration(OperationProgress operationProgress); long timeMs(); void sleep(long ms); void startUp(); void shutdown(); void setUserTaskManagerInExecutor(UserTaskManager userTaskManager); void sanityCheckDryRun(boolean dryRun, boolean stopOngoingExecution); boolean hasOngoingExecution(); boolean modifyOngoingExecution(boolean modify); BrokerStats cachedBrokerLoadStats(boolean allowCapacityEstimation); ClusterModel clusterModel(ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean populateReplicaPlacementInfo, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterCapacity(); void bootstrap(Long startMs, Long endMs, boolean clearMetrics); void pauseMetricSampling(String reason); void train(Long startMs, Long endMs); boolean setSelfHealingFor(AnomalyType anomalyType, boolean isSelfHealingEnabled); boolean setConcurrencyAdjusterFor(ConcurrencyType concurrencyType, boolean isConcurrencyAdjusterEnabled); boolean dropRecentBrokers(Set<Integer> brokersToDrop, boolean isRemoved); void addRecentBrokersPermanently(Set<Integer> brokersToAdd, boolean isRemoved); Set<Integer> recentBrokers(boolean isRemoved); void setRequestedExecutionProgressCheckIntervalMs(Long requestedExecutionProgressCheckIntervalMs); void setRequestedInterBrokerPartitionMovementConcurrency(Integer requestedInterBrokerPartitionMovementConcurrency); void setRequestedIntraBrokerPartitionMovementConcurrency(Integer requestedIntraBrokerPartitionMovementConcurrency); void setRequestedLeadershipMovementConcurrency(Integer requestedLeadershipMovementConcurrency); void resumeMetricSampling(String reason); OptimizerResult getProposals(OperationProgress operationProgress, boolean allowCapacityEstimation); boolean ignoreProposalCache(List<String> goals, ModelCompletenessRequirements requirements, Pattern excludedTopics, boolean excludeBrokers, boolean ignoreProposalCache, boolean isTriggeredByGoalViolation, Set<Integer> requestedDestinationBrokerIds, boolean isRebalanceDiskMode); synchronized OptimizerResult optimizations(ClusterModel clusterModel, List<Goal> goalsByPriority, OperationProgress operationProgress, Map<TopicPartition, List<ReplicaPlacementInfo>> initReplicaDistribution, OptimizationOptions optimizationOptions); Set<String> excludedTopics(ClusterModel clusterModel, Pattern requestedExcludedTopics); KafkaCruiseControlConfig config(); void executeProposals(Set<ExecutionProposal> proposals, Set<Integer> unthrottledBrokers, boolean isKafkaAssignerMode, Integer concurrentInterBrokerPartitionMovements, Integer concurrentIntraBrokerPartitionMovements, Integer concurrentLeaderMovements, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid, boolean skipAutoRefreshingConcurrency); void executeRemoval(Set<ExecutionProposal> proposals, boolean throttleDecommissionedBroker, Set<Integer> removedBrokers, boolean isKafkaAssignerMode, Integer concurrentInterBrokerPartitionMovements, Integer concurrentLeaderMovements, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void executeDemotion(Set<ExecutionProposal> proposals, Set<Integer> demotedBrokers, Integer concurrentLeaderMovements, int brokerCount, Long executionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void userTriggeredStopExecution(boolean forceExecutionStop); void setGeneratingProposalsForExecution(String uuid, Supplier<String> reasonSupplier, boolean isTriggeredByUserRequest); synchronized void failGeneratingProposalsForExecution(String uuid); long executionProgressCheckIntervalMs(); ExecutorState.State executionState(); ExecutorState executorState(); LoadMonitorState monitorState(Cluster cluster); AnalyzerState analyzerState(Cluster cluster); AnomalyDetectorState anomalyDetectorState(); Cluster kafkaCluster(); TopicConfigProvider topicConfigProvider(); static String cruiseControlVersion(); static String cruiseControlCommitId(); ModelCompletenessRequirements modelCompletenessRequirements(Collection<Goal> goals); boolean meetCompletenessRequirements(List<String> goals); void sanityCheckBrokerPresence(Set<Integer> brokerIds); }
@Test public void testBrokerDiesBeforeMovingPartition() throws Exception { KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(zookeeper().connectionString(), "ExecutorTestMetricGroup", "BrokerDiesBeforeMovingPartition", false); try { Map<String, TopicDescription> topicDescriptions = createTopics((int) PRODUCE_SIZE_IN_BYTES); int initialLeader0 = topicDescriptions.get(TOPIC0).partitions().get(0).leader().id(); int initialLeader1 = topicDescriptions.get(TOPIC1).partitions().get(0).leader().id(); _brokers.get(initialLeader0 == 0 ? 1 : 0).shutdown(); ExecutionProposal proposal0 = new ExecutionProposal(TP0, PRODUCE_SIZE_IN_BYTES, new ReplicaPlacementInfo(initialLeader0), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0)), Collections.singletonList(new ReplicaPlacementInfo(initialLeader0 == 0 ? 1 : 0))); ExecutionProposal proposal1 = new ExecutionProposal(TP1, 0, new ReplicaPlacementInfo(initialLeader1), Arrays.asList(new ReplicaPlacementInfo(initialLeader1), new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0)), Arrays.asList(new ReplicaPlacementInfo(initialLeader1 == 0 ? 1 : 0), new ReplicaPlacementInfo(initialLeader1))); Collection<ExecutionProposal> proposalsToExecute = Arrays.asList(proposal0, proposal1); executeAndVerifyProposals(kafkaZkClient, proposalsToExecute, Collections.emptyList(), true, null, false, false); assertEquals(Collections.singletonList(initialLeader0 == 0 ? 1 : 0), ExecutorUtils.newAssignmentForPartition(kafkaZkClient, TP0)); assertEquals(initialLeader0, kafkaZkClient.getLeaderForPartition(TP0).get()); assertEquals(initialLeader0, kafkaZkClient.getLeaderForPartition(TP1).get()); } finally { KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient); } }
public synchronized void shutdown() { LOG.info("Shutting down executor."); if (_hasOngoingExecution) { LOG.warn("Shutdown executor may take long because execution is still in progress."); } _proposalExecutor.shutdown(); try { _proposalExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _metadataClient.close(); KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(_kafkaZkClient); KafkaCruiseControlUtils.closeAdminClientWithTimeout(_adminClient); _executionHistoryScannerExecutor.shutdownNow(); _concurrencyAdjusterExecutor.shutdownNow(); LOG.info("Executor shutdown completed."); }
Executor { public synchronized void shutdown() { LOG.info("Shutting down executor."); if (_hasOngoingExecution) { LOG.warn("Shutdown executor may take long because execution is still in progress."); } _proposalExecutor.shutdown(); try { _proposalExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _metadataClient.close(); KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(_kafkaZkClient); KafkaCruiseControlUtils.closeAdminClientWithTimeout(_adminClient); _executionHistoryScannerExecutor.shutdownNow(); _concurrencyAdjusterExecutor.shutdownNow(); LOG.info("Executor shutdown completed."); } }
Executor { public synchronized void shutdown() { LOG.info("Shutting down executor."); if (_hasOngoingExecution) { LOG.warn("Shutdown executor may take long because execution is still in progress."); } _proposalExecutor.shutdown(); try { _proposalExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _metadataClient.close(); KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(_kafkaZkClient); KafkaCruiseControlUtils.closeAdminClientWithTimeout(_adminClient); _executionHistoryScannerExecutor.shutdownNow(); _concurrencyAdjusterExecutor.shutdownNow(); LOG.info("Executor shutdown completed."); } Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, AnomalyDetectorManager anomalyDetectorManager); Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetadataClient metadataClient, ExecutorNotifier executorNotifier, AnomalyDetectorManager anomalyDetectorManager); }
Executor { public synchronized void shutdown() { LOG.info("Shutting down executor."); if (_hasOngoingExecution) { LOG.warn("Shutdown executor may take long because execution is still in progress."); } _proposalExecutor.shutdown(); try { _proposalExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _metadataClient.close(); KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(_kafkaZkClient); KafkaCruiseControlUtils.closeAdminClientWithTimeout(_adminClient); _executionHistoryScannerExecutor.shutdownNow(); _concurrencyAdjusterExecutor.shutdownNow(); LOG.info("Executor shutdown completed."); } Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, AnomalyDetectorManager anomalyDetectorManager); Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetadataClient metadataClient, ExecutorNotifier executorNotifier, AnomalyDetectorManager anomalyDetectorManager); Set<ExecutionTask> inExecutionTasks(); synchronized void setRequestedExecutionProgressCheckIntervalMs(Long requestedExecutionProgressCheckIntervalMs); long executionProgressCheckIntervalMs(); Set<Integer> recentlyDemotedBrokers(); Set<Integer> recentlyRemovedBrokers(); boolean dropRecentlyRemovedBrokers(Set<Integer> brokersToDrop); boolean dropRecentlyDemotedBrokers(Set<Integer> brokersToDrop); void addRecentlyRemovedBrokers(Set<Integer> brokersToAdd); void addRecentlyDemotedBrokers(Set<Integer> brokersToAdd); ExecutorState state(); boolean setConcurrencyAdjusterFor(ConcurrencyType concurrencyType, boolean isConcurrencyAdjusterEnabled); synchronized void executeProposals(Collection<ExecutionProposal> proposals, Set<Integer> unthrottledBrokers, Set<Integer> removedBrokers, LoadMonitor loadMonitor, Integer requestedInterBrokerPartitionMovementConcurrency, Integer requestedIntraBrokerPartitionMovementConcurrency, Integer requestedLeadershipMovementConcurrency, Long requestedExecutionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid, boolean isKafkaAssignerMode, boolean skipAutoRefreshingConcurrency); synchronized void executeDemoteProposals(Collection<ExecutionProposal> proposals, Collection<Integer> demotedBrokers, LoadMonitor loadMonitor, Integer concurrentSwaps, Integer requestedLeadershipMovementConcurrency, Long requestedExecutionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void setRequestedInterBrokerPartitionMovementConcurrency(Integer requestedInterBrokerPartitionMovementConcurrency); void setRequestedIntraBrokerPartitionMovementConcurrency(Integer requestedIntraBrokerPartitionMovementConcurrency); void setRequestedLeadershipMovementConcurrency(Integer requestedLeadershipMovementConcurrency); void setUserTaskManager(UserTaskManager userTaskManager); synchronized void setGeneratingProposalsForExecution(String uuid, Supplier<String> reasonSupplier, boolean isTriggeredByUserRequest); synchronized void failGeneratingProposalsForExecution(String uuid); synchronized void userTriggeredStopExecution(boolean forceExecutionStop); synchronized void shutdown(); boolean modifyOngoingExecution(boolean modify); boolean hasOngoingExecution(); boolean hasOngoingPartitionReassignments(); boolean hasOngoingLeaderElection(); }
Executor { public synchronized void shutdown() { LOG.info("Shutting down executor."); if (_hasOngoingExecution) { LOG.warn("Shutdown executor may take long because execution is still in progress."); } _proposalExecutor.shutdown(); try { _proposalExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for anomaly detector to shutdown."); } _metadataClient.close(); KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(_kafkaZkClient); KafkaCruiseControlUtils.closeAdminClientWithTimeout(_adminClient); _executionHistoryScannerExecutor.shutdownNow(); _concurrencyAdjusterExecutor.shutdownNow(); LOG.info("Executor shutdown completed."); } Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, AnomalyDetectorManager anomalyDetectorManager); Executor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetadataClient metadataClient, ExecutorNotifier executorNotifier, AnomalyDetectorManager anomalyDetectorManager); Set<ExecutionTask> inExecutionTasks(); synchronized void setRequestedExecutionProgressCheckIntervalMs(Long requestedExecutionProgressCheckIntervalMs); long executionProgressCheckIntervalMs(); Set<Integer> recentlyDemotedBrokers(); Set<Integer> recentlyRemovedBrokers(); boolean dropRecentlyRemovedBrokers(Set<Integer> brokersToDrop); boolean dropRecentlyDemotedBrokers(Set<Integer> brokersToDrop); void addRecentlyRemovedBrokers(Set<Integer> brokersToAdd); void addRecentlyDemotedBrokers(Set<Integer> brokersToAdd); ExecutorState state(); boolean setConcurrencyAdjusterFor(ConcurrencyType concurrencyType, boolean isConcurrencyAdjusterEnabled); synchronized void executeProposals(Collection<ExecutionProposal> proposals, Set<Integer> unthrottledBrokers, Set<Integer> removedBrokers, LoadMonitor loadMonitor, Integer requestedInterBrokerPartitionMovementConcurrency, Integer requestedIntraBrokerPartitionMovementConcurrency, Integer requestedLeadershipMovementConcurrency, Long requestedExecutionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid, boolean isKafkaAssignerMode, boolean skipAutoRefreshingConcurrency); synchronized void executeDemoteProposals(Collection<ExecutionProposal> proposals, Collection<Integer> demotedBrokers, LoadMonitor loadMonitor, Integer concurrentSwaps, Integer requestedLeadershipMovementConcurrency, Long requestedExecutionProgressCheckIntervalMs, ReplicaMovementStrategy replicaMovementStrategy, Long replicationThrottle, boolean isTriggeredByUserRequest, String uuid); void setRequestedInterBrokerPartitionMovementConcurrency(Integer requestedInterBrokerPartitionMovementConcurrency); void setRequestedIntraBrokerPartitionMovementConcurrency(Integer requestedIntraBrokerPartitionMovementConcurrency); void setRequestedLeadershipMovementConcurrency(Integer requestedLeadershipMovementConcurrency); void setUserTaskManager(UserTaskManager userTaskManager); synchronized void setGeneratingProposalsForExecution(String uuid, Supplier<String> reasonSupplier, boolean isTriggeredByUserRequest); synchronized void failGeneratingProposalsForExecution(String uuid); synchronized void userTriggeredStopExecution(boolean forceExecutionStop); synchronized void shutdown(); boolean modifyOngoingExecution(boolean modify); boolean hasOngoingExecution(); boolean hasOngoingPartitionReassignments(); boolean hasOngoingLeaderElection(); final long _slowTaskAlertingBackoffTimeMs; }
@Test public void testRemoveReplicasFromConfigTest() { Set<String> replicas = new LinkedHashSet<>(); replicas.add("foo"); replicas.add("bar"); replicas.add("baz"); String throttleConfig = "foo,bar,qux,qaz,baz"; String result = ReplicationThrottleHelper.removeReplicasFromConfig(throttleConfig, replicas); assertEquals(result, "qux,qaz"); }
static String removeReplicasFromConfig(String throttleConfig, Set<String> replicas) { ArrayList<String> throttles = new ArrayList<>(Arrays.asList(throttleConfig.split(","))); throttles.removeIf(replicas::contains); return String.join(",", throttles); }
ReplicationThrottleHelper { static String removeReplicasFromConfig(String throttleConfig, Set<String> replicas) { ArrayList<String> throttles = new ArrayList<>(Arrays.asList(throttleConfig.split(","))); throttles.removeIf(replicas::contains); return String.join(",", throttles); } }
ReplicationThrottleHelper { static String removeReplicasFromConfig(String throttleConfig, Set<String> replicas) { ArrayList<String> throttles = new ArrayList<>(Arrays.asList(throttleConfig.split(","))); throttles.removeIf(replicas::contains); return String.join(",", throttles); } ReplicationThrottleHelper(KafkaZkClient kafkaZkClient, Long throttleRate); }
ReplicationThrottleHelper { static String removeReplicasFromConfig(String throttleConfig, Set<String> replicas) { ArrayList<String> throttles = new ArrayList<>(Arrays.asList(throttleConfig.split(","))); throttles.removeIf(replicas::contains); return String.join(",", throttles); } ReplicationThrottleHelper(KafkaZkClient kafkaZkClient, Long throttleRate); }
ReplicationThrottleHelper { static String removeReplicasFromConfig(String throttleConfig, Set<String> replicas) { ArrayList<String> throttles = new ArrayList<>(Arrays.asList(throttleConfig.split(","))); throttles.removeIf(replicas::contains); return String.join(",", throttles); } ReplicationThrottleHelper(KafkaZkClient kafkaZkClient, Long throttleRate); }
@Test public void testAllWindows() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); assertTrue(aggregator.allWindows().isEmpty()); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 0, WINDOW_MS, _metricDef); List<Long> allStWindows = aggregator.allWindows(); assertEquals(NUM_WINDOWS + 1, allStWindows.size()); for (int i = 0; i < NUM_WINDOWS + 1; i++) { assertEquals((i + 1) * WINDOW_MS, allStWindows.get(i).longValue()); } }
public List<Long> allWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex); }
MetricSampleAggregator extends LongGenerationed { public List<Long> allWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex); } }
MetricSampleAggregator extends LongGenerationed { public List<Long> allWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public List<Long> allWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public List<Long> allWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testIntraBrokerReplicaMovements() { ExecutionProposal p = new ExecutionProposal(TP, 0, _r0d0, Arrays.asList(_r0d0, _r1d1), Arrays.asList(_r0d1, _r1d1)); Assert.assertEquals(1, p.replicasToMoveBetweenDisksByBroker().size()); }
public Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker() { return _replicasToMoveBetweenDisksByBroker; }
ExecutionProposal { public Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker() { return _replicasToMoveBetweenDisksByBroker; } }
ExecutionProposal { public Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker() { return _replicasToMoveBetweenDisksByBroker; } ExecutionProposal(TopicPartition tp, long partitionSize, ReplicaPlacementInfo oldLeader, List<ReplicaPlacementInfo> oldReplicas, List<ReplicaPlacementInfo> newReplicas); }
ExecutionProposal { public Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker() { return _replicasToMoveBetweenDisksByBroker; } ExecutionProposal(TopicPartition tp, long partitionSize, ReplicaPlacementInfo oldLeader, List<ReplicaPlacementInfo> oldReplicas, List<ReplicaPlacementInfo> newReplicas); boolean isInterBrokerMovementCompleted(PartitionInfo partitionInfo); boolean isInterBrokerMovementAborted(PartitionInfo partitionInfo); String topic(); int partitionId(); TopicPartition topicPartition(); ReplicaPlacementInfo oldLeader(); ReplicaPlacementInfo newLeader(); List<ReplicaPlacementInfo> oldReplicas(); List<ReplicaPlacementInfo> newReplicas(); Set<ReplicaPlacementInfo> replicasToAdd(); Set<ReplicaPlacementInfo> replicasToRemove(); Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker(); boolean hasReplicaAction(); boolean hasLeaderAction(); long interBrokerDataToMoveInMB(); long intraBrokerDataToMoveInMB(); long dataToMoveInMB(); Map<String, Object> getJsonStructure(); @Override String toString(); @Override boolean equals(Object other); @Override int hashCode(); }
ExecutionProposal { public Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker() { return _replicasToMoveBetweenDisksByBroker; } ExecutionProposal(TopicPartition tp, long partitionSize, ReplicaPlacementInfo oldLeader, List<ReplicaPlacementInfo> oldReplicas, List<ReplicaPlacementInfo> newReplicas); boolean isInterBrokerMovementCompleted(PartitionInfo partitionInfo); boolean isInterBrokerMovementAborted(PartitionInfo partitionInfo); String topic(); int partitionId(); TopicPartition topicPartition(); ReplicaPlacementInfo oldLeader(); ReplicaPlacementInfo newLeader(); List<ReplicaPlacementInfo> oldReplicas(); List<ReplicaPlacementInfo> newReplicas(); Set<ReplicaPlacementInfo> replicasToAdd(); Set<ReplicaPlacementInfo> replicasToRemove(); Map<Integer, ReplicaPlacementInfo> replicasToMoveBetweenDisksByBroker(); boolean hasReplicaAction(); boolean hasLeaderAction(); long interBrokerDataToMoveInMB(); long intraBrokerDataToMoveInMB(); long dataToMoveInMB(); Map<String, Object> getJsonStructure(); @Override String toString(); @Override boolean equals(Object other); @Override int hashCode(); }
@Test public void testClear() { List<ExecutionProposal> proposals = new ArrayList<>(); proposals.add(_leaderMovement1); proposals.add(_partitionMovement1); ExecutionTaskPlanner planner = new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties())); Set<PartitionInfo> partitions = new HashSet<>(); partitions.add(generatePartitionInfo(_leaderMovement1, false)); partitions.add(generatePartitionInfo(_partitionMovement1, false)); Cluster expectedCluster = new Cluster(null, _expectedNodes, partitions, Collections.<String>emptySet(), Collections.<String>emptySet()); planner.addExecutionProposals(proposals, expectedCluster, null); assertEquals(2, planner.remainingLeadershipMovements().size()); assertEquals(2, planner.remainingInterBrokerReplicaMovements().size()); planner.clear(); assertEquals(0, planner.remainingLeadershipMovements().size()); assertEquals(0, planner.remainingInterBrokerReplicaMovements().size()); }
public void clear() { _intraPartMoveTaskByBrokerId.clear(); _interPartMoveTaskByBrokerId.clear(); _remainingLeadershipMovements.clear(); _remainingInterBrokerReplicaMovements.clear(); _remainingIntraBrokerReplicaMovements.clear(); }
ExecutionTaskPlanner { public void clear() { _intraPartMoveTaskByBrokerId.clear(); _interPartMoveTaskByBrokerId.clear(); _remainingLeadershipMovements.clear(); _remainingInterBrokerReplicaMovements.clear(); _remainingIntraBrokerReplicaMovements.clear(); } }
ExecutionTaskPlanner { public void clear() { _intraPartMoveTaskByBrokerId.clear(); _interPartMoveTaskByBrokerId.clear(); _remainingLeadershipMovements.clear(); _remainingInterBrokerReplicaMovements.clear(); _remainingIntraBrokerReplicaMovements.clear(); } ExecutionTaskPlanner(AdminClient adminClient, KafkaCruiseControlConfig config); }
ExecutionTaskPlanner { public void clear() { _intraPartMoveTaskByBrokerId.clear(); _interPartMoveTaskByBrokerId.clear(); _remainingLeadershipMovements.clear(); _remainingInterBrokerReplicaMovements.clear(); _remainingIntraBrokerReplicaMovements.clear(); } ExecutionTaskPlanner(AdminClient adminClient, KafkaCruiseControlConfig config); void addExecutionProposals(Collection<ExecutionProposal> proposals, Cluster cluster, ReplicaMovementStrategy replicaMovementStrategy); Set<ExecutionTask> remainingInterBrokerReplicaMovements(); Set<ExecutionTask> remainingIntraBrokerReplicaMovements(); Collection<ExecutionTask> remainingLeadershipMovements(); List<ExecutionTask> getLeadershipMovementTasks(int numTasks); List<ExecutionTask> getInterBrokerReplicaMovementTasks(Map<Integer, Integer> readyBrokers, Set<TopicPartition> inProgressPartitions); List<ExecutionTask> getIntraBrokerReplicaMovementTasks(Map<Integer, Integer> readyBrokers); void clear(); }
ExecutionTaskPlanner { public void clear() { _intraPartMoveTaskByBrokerId.clear(); _interPartMoveTaskByBrokerId.clear(); _remainingLeadershipMovements.clear(); _remainingInterBrokerReplicaMovements.clear(); _remainingIntraBrokerReplicaMovements.clear(); } ExecutionTaskPlanner(AdminClient adminClient, KafkaCruiseControlConfig config); void addExecutionProposals(Collection<ExecutionProposal> proposals, Cluster cluster, ReplicaMovementStrategy replicaMovementStrategy); Set<ExecutionTask> remainingInterBrokerReplicaMovements(); Set<ExecutionTask> remainingIntraBrokerReplicaMovements(); Collection<ExecutionTask> remainingLeadershipMovements(); List<ExecutionTask> getLeadershipMovementTasks(int numTasks); List<ExecutionTask> getInterBrokerReplicaMovementTasks(Map<Integer, Integer> readyBrokers, Set<TopicPartition> inProgressPartitions); List<ExecutionTask> getIntraBrokerReplicaMovementTasks(Map<Integer, Integer> readyBrokers); void clear(); }
@Test public void testScoreFunctionOnly() { Broker broker = generateBroker(NUM_REPLICAS); broker.trackSortedReplicas(SORT_NAME, null, null, SCORE_FUNC); SortedReplicas sr = broker.trackedSortedReplicas(SORT_NAME); double lastScore = Double.NEGATIVE_INFINITY; for (Replica r : sr.sortedReplicas(false)) { assertTrue(SCORE_FUNC.apply(r) >= lastScore); } }
SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
@Test public void testPriorityFunction() { Broker broker = generateBroker(NUM_REPLICAS); new SortedReplicasHelper().addPriorityFunc(PRIORITY_FUNC) .setScoreFunc(SCORE_FUNC) .trackSortedReplicasFor(SORT_NAME, broker); SortedReplicas sr = broker.trackedSortedReplicas(SORT_NAME); assertEquals(NUM_REPLICAS, sr.sortedReplicas(false).size()); verifySortedReplicas(sr); }
SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
@Test public void testSelectionFunction() { Broker broker = generateBroker(NUM_REPLICAS); new SortedReplicasHelper().addSelectionFunc(SELECTION_FUNC) .addPriorityFunc(PRIORITY_FUNC) .setScoreFunc(SCORE_FUNC) .trackSortedReplicasFor(SORT_NAME, broker); SortedReplicas sr = broker.trackedSortedReplicas(SORT_NAME); assertEquals(broker.leaderReplicas().size(), sr.sortedReplicas(false).size()); verifySortedReplicas(sr); }
SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
SortedReplicas { SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); } SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction); SortedReplicas(Broker broker, Disk disk, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunc, boolean initialize); SortedSet<Replica> sortedReplicas(boolean clone); Set<Function<Replica, Boolean>> selectionFunctions(); List<Function<Replica, Integer>> priorityFunctions(); Function<Replica, Double> scoreFunction(); void add(Replica replica); }
@Test public void testAssignment() { int maxNumPartitionsForTopic = -1; int totalNumPartitions = 0; Set<PartitionInfo> partitions = new HashSet<>(); for (int i = 0; i < NUM_TOPICS; i++) { int randomNumPartitions = 4 * (RANDOM.nextInt(100) + 1); maxNumPartitionsForTopic = Math.max(randomNumPartitions, maxNumPartitionsForTopic); totalNumPartitions += randomNumPartitions; for (int j = 0; j < randomNumPartitions; j++) { partitions.add(new PartitionInfo(TOPIC_PREFIX + i, j, NODE_0, nodes(), nodes())); } } Cluster cluster = new Cluster("cluster", Arrays.asList(nodes()), partitions, Collections.emptySet(), Collections.emptySet()); Metadata metadata = new Metadata(METADATA_REFRESH_BACKOFF, METADATA_EXPIRY_MS, new LogContext(), new ClusterResourceListeners()); Map<String, Set<PartitionInfo>> topicToTopicPartitions = new HashMap<>(partitions.size()); for (PartitionInfo tp : partitions) { topicToTopicPartitions.putIfAbsent(tp.topic(), new HashSet<>()); topicToTopicPartitions.get(tp.topic()).add(tp); } List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(partitions.size()); for (Map.Entry<String, Set<PartitionInfo>> entry : topicToTopicPartitions.entrySet()) { List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(entry.getValue().size()); for (PartitionInfo tp : entry.getValue()) { partitionMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, tp.partition(), NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList())); } topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, entry.getKey(), false, partitionMetadata)); } MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata); metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 0); MetricSamplerPartitionAssignor assignor = new DefaultMetricSamplerPartitionAssignor(); Set<TopicPartition> assignment = assignor.assignPartitions(metadata.fetch()); int maxAssignedNumPartitionsForFetcher = -1; int minAssignedNumPartitionsForFetcher = Integer.MAX_VALUE; int totalAssignedNumPartitions = 0; maxAssignedNumPartitionsForFetcher = Math.max(maxAssignedNumPartitionsForFetcher, assignment.size()); minAssignedNumPartitionsForFetcher = Math.min(minAssignedNumPartitionsForFetcher, assignment.size()); Set<TopicPartition> uniqueAssignedPartitions = new HashSet<>(assignment); totalAssignedNumPartitions += assignment.size(); assertEquals("Total assigned number of partitions should be " + totalNumPartitions, totalNumPartitions, totalAssignedNumPartitions); assertEquals("Total number of unique assigned partitions should be " + totalNumPartitions, totalNumPartitions, uniqueAssignedPartitions.size()); int avgAssignedPartitionsPerFetcher = totalNumPartitions; assertTrue("In the worst case the max number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, maxAssignedNumPartitionsForFetcher - avgAssignedPartitionsPerFetcher <= maxNumPartitionsForTopic); assertTrue("In the worst case the min number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, avgAssignedPartitionsPerFetcher - minAssignedNumPartitionsForFetcher <= maxNumPartitionsForTopic); }
@Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; }
DefaultMetricSamplerPartitionAssignor implements MetricSamplerPartitionAssignor { @Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; } }
DefaultMetricSamplerPartitionAssignor implements MetricSamplerPartitionAssignor { @Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; } }
DefaultMetricSamplerPartitionAssignor implements MetricSamplerPartitionAssignor { @Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; } @Override void configure(Map<String, ?> configs); @Override List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers); @Override Set<TopicPartition> assignPartitions(Cluster cluster); }
DefaultMetricSamplerPartitionAssignor implements MetricSamplerPartitionAssignor { @Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; } @Override void configure(Map<String, ?> configs); @Override List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers); @Override Set<TopicPartition> assignPartitions(Cluster cluster); }
@Test public void testAggregate() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), Long.MAX_VALUE, new OperationProgress()); Map<PartitionEntity, ValuesAndExtrapolations> valuesAndExtrapolations = result.valuesAndExtrapolations(); assertEquals("The windows should only have one partition", 1, valuesAndExtrapolations.size()); ValuesAndExtrapolations partitionValuesAndExtrapolations = valuesAndExtrapolations.get(PE); assertNotNull(partitionValuesAndExtrapolations); assertEquals(NUM_WINDOWS, partitionValuesAndExtrapolations.metricValues().length()); for (int i = 0; i < NUM_WINDOWS; i++) { assertEquals((NUM_WINDOWS - i) * WINDOW_MS, result.valuesAndExtrapolations().get(PE).window(i)); for (Resource resource : Resource.cachedValues()) { Collection<Short> metricIds = KafkaMetricDef.resourceToMetricIds(resource); double expectedValue = (resource == Resource.DISK ? (NUM_WINDOWS - 1 - i) * 10 + MIN_SAMPLES_PER_WINDOW - 1 : (NUM_WINDOWS - 1 - i) * 10 + (MIN_SAMPLES_PER_WINDOW - 1) / 2.0) / (resource == Resource.CPU ? UNIT_INTERVAL_TO_PERCENTAGE : 1.0) * metricIds.size(); assertEquals("The utilization for " + resource + " should be " + expectedValue, expectedValue, partitionValuesAndExtrapolations.metricValues().valuesForGroup(resource.name(), KafkaMetricDef.commonMetricDef(), true).get(i), 0.01); } } MetadataClient.ClusterAndGeneration clusterAndGeneration = new MetadataClient.ClusterAndGeneration(metadata.fetch(), 1); assertEquals(NUM_WINDOWS, metricSampleAggregator.validWindows(clusterAndGeneration.cluster(), 1.0).size()); Map<Long, Float> monitoredPercentages = metricSampleAggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster()); for (double percentage : monitoredPercentages.values()) { assertEquals(1.0, percentage, 0.0); } assertEquals(NUM_WINDOWS, metricSampleAggregator.availableWindows().size()); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testAggregateWithUpdatedCluster() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); TopicPartition tp1 = new TopicPartition(TOPIC0 + "1", 0); Cluster cluster = getCluster(Arrays.asList(TP, tp1)); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(2); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, Collections.singletonList(new MetadataResponse.PartitionMetadata( Errors.NONE, PARTITION, NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList())))); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0 + "1", false, Collections.singletonList(new MetadataResponse.PartitionMetadata( Errors.NONE, 0, NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList())))); MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata); metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1); Map<PartitionEntity, ValuesAndExtrapolations> aggregateResult = metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations(); assertEquals(1, aggregateResult.size()); assertEquals(NUM_WINDOWS, aggregateResult.get(PE).windows().size()); ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(cluster, -1, Long.MAX_VALUE, requirements, new OperationProgress()); aggregateResult = result.valuesAndExtrapolations(); assertNotNull("tp1 should be included because includeAllTopics is set to true", aggregateResult.get(new PartitionEntity(tp1))); Map<Integer, Extrapolation> extrapolations = aggregateResult.get(new PartitionEntity(tp1)).extrapolations(); assertEquals(NUM_WINDOWS, extrapolations.size()); for (int i = 0; i < NUM_WINDOWS; i++) { assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolations.get(i)); } }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testAggregateWithPartitionExtrapolations() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); TopicPartition tp1 = new TopicPartition(TOPIC0, 1); Cluster cluster = getCluster(Arrays.asList(TP, tp1)); PartitionEntity pe1 = new PartitionEntity(tp1); List<MetadataResponse.PartitionMetadata> partitionMetadata = Collections.singletonList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, NODE_0, Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), Arrays.asList(nodes()), Arrays.asList(nodes()), Collections.emptyList())); List<MetadataResponse.TopicMetadata> topicMetadata = Collections.singletonList( new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, partitionMetadata)); MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata); metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1); populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, pe1, 0, WINDOW_MS, KafkaMetricDef.commonMetricDef()); CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, pe1, NUM_WINDOWS - 1, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()); assertEquals(2, result.valuesAndExtrapolations().size()); assertTrue(result.valuesAndExtrapolations().get(PE).extrapolations().isEmpty()); assertEquals(1, result.valuesAndExtrapolations().get(pe1).extrapolations().size()); assertTrue(result.valuesAndExtrapolations().get(pe1).extrapolations().containsKey(1)); assertEquals((NUM_WINDOWS - 1) * WINDOW_MS, result.valuesAndExtrapolations().get(pe1).window(1)); assertEquals(Extrapolation.AVG_ADJACENT, result.valuesAndExtrapolations().get(pe1).extrapolations().get(1)); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testFallbackToAvgAvailable() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, 2, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress()); assertEquals(NUM_WINDOWS - 2, result.valuesAndExtrapolations().get(PE).windows().size()); populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW - 2, metricSampleAggregator); result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress()); int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length(); assertEquals(NUM_WINDOWS, numWindows); int numExtrapolationss = 0; for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) { assertEquals(Extrapolation.AVG_AVAILABLE, entry.getValue()); numExtrapolationss++; } assertEquals(2, numExtrapolationss); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testAvailableWindows() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); assertTrue(aggregator.availableWindows().isEmpty()); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 0, WINDOW_MS, _metricDef); assertTrue(aggregator.availableWindows().isEmpty()); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 1, WINDOW_MS, _metricDef); List<Long> availableWindows = aggregator.availableWindows(); assertEquals(NUM_WINDOWS - 2, availableWindows.size()); for (int i = 0; i < NUM_WINDOWS - 2; i++) { assertEquals((i + 1) * WINDOW_MS, availableWindows.get(i).longValue()); } }
public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1); PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition); Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS, WINDOW_MS, KafkaMetricDef.commonMetricDef()); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaMetricDef .commonMetricDef()); CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS + 2, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress()); int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length(); assertEquals(NUM_WINDOWS, numWindows); int numExtrapolations = 0; for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) { assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue()); numExtrapolations++; } assertEquals(1, numExtrapolations); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testTooManyFlaws() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, 3, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress()); assertEquals(NUM_WINDOWS - 3, result.valuesAndExtrapolations().get(PE).windows().size()); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testNotEnoughWindows() { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); try { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(NUM_WINDOWS, 0.0, false); metricSampleAggregator.aggregate(metadata.fetch(), -1L, (NUM_WINDOWS - 1) * WINDOW_MS - 1, requirements, new OperationProgress()); fail("Should throw NotEnoughValidWindowsException"); } catch (NotEnoughValidWindowsException nse) { } }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testValidWindows() { TestContext ctx = setupScenario1(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); SortedSet<Long> validWindows = aggregator.validWindows(clusterAndGeneration.cluster(), 1.0); assertEquals(NUM_WINDOWS, validWindows.size()); assertValidWindows(validWindows, NUM_WINDOWS, Collections.emptySet()); }
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testValidWindowsWithInvalidPartitions() { TestContext ctx = setupScenario2(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); SortedSet<Long> validWindows = aggregator.validWindows(clusterAndGeneration.cluster(), 1.0); assertEquals("Should have three invalid windows.", NUM_WINDOWS - 3, validWindows.size()); assertValidWindows(validWindows, NUM_WINDOWS - 1, Arrays.asList(6, 7)); assertEquals(NUM_WINDOWS, aggregator.validWindows(clusterAndGeneration.cluster(), 0.5).size()); }
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testValidWindowWithDifferentInvalidPartitions() { TestContext ctx = setupScenario3(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); SortedSet<Long> validWindows = aggregator.validWindows(clusterAndGeneration.cluster(), 0.75); assertEquals("Should have two invalid windows.", NUM_WINDOWS - 2, validWindows.size()); assertValidWindows(validWindows, NUM_WINDOWS, Arrays.asList(6, 7)); }
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testValidWindowsWithTooManyExtrapolations() { TestContext ctx = setupScenario4(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); SortedSet<Long> validWindows = aggregator.validWindows(clusterAndGeneration.cluster(), 0.75); assertEquals("Should have two invalid windows.", NUM_WINDOWS - 2, validWindows.size()); assertValidWindows(validWindows, NUM_WINDOWS, Arrays.asList(6, 7)); }
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testMonitoredPercentage() { TestContext ctx = setupScenario1(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); assertEquals(1.0, aggregator.monitoredPercentage(clusterAndGeneration.cluster()), 0.01); ctx = setupScenario2(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); assertEquals(0.75, aggregator.monitoredPercentage(clusterAndGeneration.cluster()), 0.01); ctx = setupScenario3(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); assertEquals((double) 4 / 6, aggregator.monitoredPercentage(clusterAndGeneration.cluster()), 0.01); ctx = setupScenario4(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); assertEquals((double) 4 / 6, aggregator.monitoredPercentage(clusterAndGeneration.cluster()), 0.01); }
public double monitoredPercentage(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return completeness.validEntityRatio(); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public double monitoredPercentage(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return completeness.validEntityRatio(); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public double monitoredPercentage(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return completeness.validEntityRatio(); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public double monitoredPercentage(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return completeness.validEntityRatio(); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public double monitoredPercentage(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return completeness.validEntityRatio(); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testMonitoredPercentagesByWindows() { TestContext ctx = setupScenario1(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); Map<Long, Float> percentages = aggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster()); assertEquals(NUM_WINDOWS, percentages.size()); for (Map.Entry<Long, Float> entry : percentages.entrySet()) { assertEquals(1.0, entry.getValue(), 0.01); } ctx = setupScenario2(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); percentages = aggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster()); assertEquals(NUM_WINDOWS, percentages.size()); for (Map.Entry<Long, Float> entry : percentages.entrySet()) { long window = entry.getKey(); if (window == 6000 || window == 7000 || window == 20000) { assertEquals(0.5, entry.getValue(), 0.01); } else { assertEquals(1.0, entry.getValue(), 0.01); } } ctx = setupScenario3(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); percentages = aggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster()); assertEquals(NUM_WINDOWS, percentages.size()); for (Map.Entry<Long, Float> entry : percentages.entrySet()) { long window = entry.getKey(); if (window == 6000 || window == 7000 || window == 18000 || window == 19000) { assertEquals((double) 4 / 6, entry.getValue(), 0.01); } else { assertEquals(1.0, entry.getValue(), 0.01); } } ctx = setupScenario4(); aggregator = ctx.aggregator(); clusterAndGeneration = ctx.clusterAndGeneration(0); percentages = aggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster()); assertEquals(NUM_WINDOWS, percentages.size()); for (Map.Entry<Long, Float> entry : percentages.entrySet()) { long window = entry.getKey(); if (window == 6000 || window == 7000) { assertEquals((double) 2 / 6, entry.getValue(), 0.01); } else { assertEquals((double) 4 / 6, entry.getValue(), 0.01); } } }
public SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validEntityRatioWithGroupGranularityByWindowIndex(), _windowMs); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validEntityRatioWithGroupGranularityByWindowIndex(), _windowMs); } }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validEntityRatioWithGroupGranularityByWindowIndex(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validEntityRatioWithGroupGranularityByWindowIndex(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
KafkaPartitionMetricSampleAggregator extends MetricSampleAggregator<String, PartitionEntity> { public SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validEntityRatioWithGroupGranularityByWindowIndex(), _windowMs); } KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata); boolean addSample(PartitionMetricSample sample); boolean addSample(PartitionMetricSample sample, boolean leaderValidation); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress); MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements, OperationProgress operationProgress); MetricSampleCompleteness<String, PartitionEntity> completeness(Cluster cluster, long from, long to, ModelCompletenessRequirements requirements); SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage); double monitoredPercentage(Cluster cluster); SortedMap<Long, Float> validPartitionRatioByWindows(Cluster cluster); }
@Test public void testMeetCompletenessRequirements() { TestContext context = prepareContext(); LoadMonitor loadMonitor = context.loadmonitor(); KafkaPartitionMetricSampleAggregator aggregator = context.aggregator(); ModelCompletenessRequirements requirements1 = new ModelCompletenessRequirements(1, 1.0, false); ModelCompletenessRequirements requirements2 = new ModelCompletenessRequirements(1, 0.5, false); ModelCompletenessRequirements requirements3 = new ModelCompletenessRequirements(2, 1.0, false); ModelCompletenessRequirements requirements4 = new ModelCompletenessRequirements(2, 0.5, false); CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T0P0, 0, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T0P1, 0, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(2, 4, aggregator, PE_T1P0, 0, WINDOW_MS, METRIC_DEF); MetadataClient.ClusterAndGeneration clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T0P0, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T0P1, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 4, aggregator, PE_T1P0, 2, WINDOW_MS, METRIC_DEF); CruiseControlUnitTestUtils.populateSampleAggregator(1, 1, aggregator, PE_T1P1, 2, WINDOW_MS, METRIC_DEF); clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); CruiseControlUnitTestUtils.populateSampleAggregator(1, 1, aggregator, PE_T1P1, 0, WINDOW_MS, METRIC_DEF); clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertFalse(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); CruiseControlUnitTestUtils.populateSampleAggregator(1, 3, aggregator, PE_T1P1, 1, WINDOW_MS, METRIC_DEF); clusterAndGeneration = loadMonitor.refreshClusterAndGeneration(); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements1)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements2)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements3)); assertTrue(loadMonitor.meetCompletenessRequirements(clusterAndGeneration.cluster(), requirements4)); }
public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; }
LoadMonitor { public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; } }
LoadMonitor { public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; } LoadMonitor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); LoadMonitor(KafkaCruiseControlConfig config, MetadataClient metadataClient, AdminClient adminClient, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); }
LoadMonitor { public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; } LoadMonitor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); LoadMonitor(KafkaCruiseControlConfig config, MetadataClient metadataClient, AdminClient adminClient, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); void startUp(); void shutdown(); LoadMonitorState state(Cluster kafkaCluster); TopicConfigProvider topicConfigProvider(); LoadMonitorTaskRunner.LoadMonitorTaskRunnerState taskRunnerState(); void bootstrap(long startMs, long endMs, boolean clearMetrics); void bootstrap(long startMs, boolean clearMetrics); void bootstrap(boolean clearMetrics); void train(long startMs, long endMs); Cluster kafkaCluster(); void pauseMetricSampling(String reason, boolean forcePauseSampling); void resumeMetricSampling(String reason); MetricSampler.SamplingMode samplingMode(); void setSamplingMode(MetricSampler.SamplingMode samplingMode); AutoCloseableSemaphore acquireForModelGeneration(OperationProgress operationProgress); Map<BrokerEntity, ValuesAndExtrapolations> currentBrokerMetricValues(); Map<PartitionEntity, ValuesAndExtrapolations> currentPartitionMetricValues(); ClusterModel clusterModel(long now, ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterCapacity(); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean populateReplicaPlacementInfo, boolean allowCapacityEstimation, OperationProgress operationProgress); ModelGeneration clusterModelGeneration(); synchronized BrokerStats cachedBrokerLoadStats(boolean allowCapacityEstimation); Set<Integer> brokersWithReplicas(long timeout); MetadataClient.ClusterAndGeneration refreshClusterAndGeneration(); boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements); boolean meetCompletenessRequirements(ModelCompletenessRequirements requirements); MetricSampleAggregationResult<String, BrokerEntity> brokerMetrics(); Set<Integer> deadBrokersWithReplicas(long timeout); Set<Integer> brokersWithOfflineReplicas(long timeout); long lastUpdateMs(); }
LoadMonitor { public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) { int numValidWindows = _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size(); int requiredNumValidWindows = requirements.minRequiredNumWindows(); return numValidWindows >= requiredNumValidWindows; } LoadMonitor(KafkaCruiseControlConfig config, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); LoadMonitor(KafkaCruiseControlConfig config, MetadataClient metadataClient, AdminClient adminClient, Time time, MetricRegistry dropwizardMetricRegistry, MetricDef metricDef); void startUp(); void shutdown(); LoadMonitorState state(Cluster kafkaCluster); TopicConfigProvider topicConfigProvider(); LoadMonitorTaskRunner.LoadMonitorTaskRunnerState taskRunnerState(); void bootstrap(long startMs, long endMs, boolean clearMetrics); void bootstrap(long startMs, boolean clearMetrics); void bootstrap(boolean clearMetrics); void train(long startMs, long endMs); Cluster kafkaCluster(); void pauseMetricSampling(String reason, boolean forcePauseSampling); void resumeMetricSampling(String reason); MetricSampler.SamplingMode samplingMode(); void setSamplingMode(MetricSampler.SamplingMode samplingMode); AutoCloseableSemaphore acquireForModelGeneration(OperationProgress operationProgress); Map<BrokerEntity, ValuesAndExtrapolations> currentBrokerMetricValues(); Map<PartitionEntity, ValuesAndExtrapolations> currentPartitionMetricValues(); ClusterModel clusterModel(long now, ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean allowCapacityEstimation, OperationProgress operationProgress); ClusterModel clusterCapacity(); ClusterModel clusterModel(long from, long to, ModelCompletenessRequirements requirements, boolean populateReplicaPlacementInfo, boolean allowCapacityEstimation, OperationProgress operationProgress); ModelGeneration clusterModelGeneration(); synchronized BrokerStats cachedBrokerLoadStats(boolean allowCapacityEstimation); Set<Integer> brokersWithReplicas(long timeout); MetadataClient.ClusterAndGeneration refreshClusterAndGeneration(); boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements); boolean meetCompletenessRequirements(ModelCompletenessRequirements requirements); MetricSampleAggregationResult<String, BrokerEntity> brokerMetrics(); Set<Integer> deadBrokersWithReplicas(long timeout); Set<Integer> brokersWithOfflineReplicas(long timeout); long lastUpdateMs(); }
@Test public void testAddSamplesWithLargeInterval() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 0, WINDOW_MS, _metricDef); CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY1, 4 * NUM_WINDOWS, WINDOW_MS, _metricDef); List<Long> availableWindows = aggregator.availableWindows(); assertEquals(NUM_WINDOWS, availableWindows.size()); for (int i = 0; i < NUM_WINDOWS; i++) { assertEquals((i + 4 * NUM_WINDOWS) * WINDOW_MS, availableWindows.get(i).longValue()); } }
public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public List<Long> availableWindows() { return getWindowList(_oldestWindowIndex, _currentWindowIndex - 1); } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testMetadataChanged() { Node node0 = new Node(0, "localhost", 100, "rack0"); Node node1 = new Node(1, "localhost", 100, "rack1"); Node node2 = new Node(2, "localhost", 100, "rack2"); Node[] nodesWithOrder1 = {node0, node1}; Node[] nodesWithOrder2 = {node1, node0}; Node[] nodes2 = {node0, node2}; String topic0 = "topic0"; String topic1 = "topic1"; String topic2 = "topic2"; PartitionInfo t0p0 = new PartitionInfo(topic0, 0, node0, nodesWithOrder1, nodesWithOrder2); PartitionInfo t0p1 = new PartitionInfo(topic0, 1, node1, nodesWithOrder1, nodesWithOrder2); PartitionInfo t1p0 = new PartitionInfo(topic1, 0, node2, nodesWithOrder1, nodesWithOrder2); PartitionInfo t1p1 = new PartitionInfo(topic1, 1, node0, nodesWithOrder1, nodesWithOrder2); Set<PartitionInfo> partitions1 = new HashSet<>(Arrays.asList(t0p0, t0p1, t1p0, t1p1)); Cluster cluster1 = new Cluster("cluster", Arrays.asList(node0, node1, node2), partitions1, Collections.emptySet(), Collections.emptySet()); PartitionInfo t2p0 = new PartitionInfo(topic2, 0, node1, nodesWithOrder1, nodesWithOrder2); Cluster cluster2 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic2, 0), t2p0)); PartitionInfo t0p2 = new PartitionInfo(topic0, 2, node1, nodesWithOrder1, nodesWithOrder2); Cluster cluster3 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic2, 2), t0p2)); PartitionInfo t0p0DifferentOrder = new PartitionInfo(topic0, 0, node0, nodesWithOrder2, nodesWithOrder2); Cluster cluster4 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic0, 0), t0p0DifferentOrder)); PartitionInfo t0p0DifferentAssignment = new PartitionInfo(topic0, 0, node0, nodes2, nodesWithOrder2); Cluster cluster5 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic0, 0), t0p0DifferentAssignment)); PartitionInfo t0p0DifferentLeader = new PartitionInfo(topic0, 0, node1, nodesWithOrder1, nodesWithOrder2); Cluster cluster6 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic0, 0), t0p0DifferentLeader)); PartitionInfo t0p0DifferentIsr = new PartitionInfo(topic0, 0, node0, nodesWithOrder1, new Node[]{node0}); Cluster cluster7 = cluster1.withPartitions(Collections.singletonMap(new TopicPartition(topic0, 0), t0p0DifferentIsr)); assertTrue(MonitorUtils.metadataChanged(cluster1, cluster2)); assertTrue(MonitorUtils.metadataChanged(cluster1, cluster3)); assertTrue(MonitorUtils.metadataChanged(cluster1, cluster4)); assertTrue(MonitorUtils.metadataChanged(cluster1, cluster5)); assertTrue(MonitorUtils.metadataChanged(cluster1, cluster6)); assertFalse(MonitorUtils.metadataChanged(cluster1, cluster7)); }
public static boolean metadataChanged(Cluster prev, Cluster curr) { Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; } prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; } if (!prev.topics().equals(curr.topics())) { return true; } for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; } for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true; } } } return false; }
MonitorUtils { public static boolean metadataChanged(Cluster prev, Cluster curr) { Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; } prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; } if (!prev.topics().equals(curr.topics())) { return true; } for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; } for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true; } } } return false; } }
MonitorUtils { public static boolean metadataChanged(Cluster prev, Cluster curr) { Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; } prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; } if (!prev.topics().equals(curr.topics())) { return true; } for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; } for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true; } } } return false; } private MonitorUtils(); }
MonitorUtils { public static boolean metadataChanged(Cluster prev, Cluster curr) { Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; } prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; } if (!prev.topics().equals(curr.topics())) { return true; } for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; } for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true; } } } return false; } private MonitorUtils(); static boolean metadataChanged(Cluster prev, Cluster curr); static ModelCompletenessRequirements combineLoadRequirementOptions(Collection<Goal> goals); static int totalNumPartitions(Cluster cluster); static String getRackHandleNull(Node node); }
MonitorUtils { public static boolean metadataChanged(Cluster prev, Cluster curr) { Set<Node> prevNodeSet = new HashSet<>(prev.nodes()); if (prevNodeSet.size() != curr.nodes().size()) { return true; } prevNodeSet.removeAll(curr.nodes()); if (!prevNodeSet.isEmpty()) { return true; } if (!prev.topics().equals(curr.topics())) { return true; } for (String topic : prev.topics()) { if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) { return true; } for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) { PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true; } } } return false; } private MonitorUtils(); static boolean metadataChanged(Cluster prev, Cluster curr); static ModelCompletenessRequirements combineLoadRequirementOptions(Collection<Goal> goals); static int totalNumPartitions(Cluster cluster); static String getRackHandleNull(Node node); static final double UNIT_INTERVAL_TO_PERCENTAGE; static final Map<Resource, Double> EMPTY_BROKER_CAPACITY; static final long BROKER_CAPACITY_FETCH_TIMEOUT_MS; }
@Test(expected = BrokerCapacityResolutionException.class) public void testParseConfigFile() throws TimeoutException, BrokerCapacityResolutionException { BrokerCapacityConfigResolver configResolver = getBrokerCapacityConfigResolver("testCapacityConfig.json", this.getClass()); assertEquals(200000.0, configResolver.capacityForBroker("", "", 0, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false) .capacity().get(Resource.NW_IN), 0.01); assertEquals(100000.0, configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true) .capacity().get(Resource.NW_IN), 0.01); try { configResolver.capacityForBroker("", "", BrokerCapacityConfigFileResolver.DEFAULT_CAPACITY_BROKER_ID, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false); fail("Should have thrown exception for negative broker id"); } catch (IllegalArgumentException e) { } assertTrue(configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).isEstimated()); assertTrue(configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).estimationInfo().length() > 0); configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false); }
@Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); static final String CAPACITY_CONFIG_FILE; static final int DEFAULT_CAPACITY_BROKER_ID; static final double DEFAULT_CPU_CAPACITY_WITH_CORES; }
@Test public void testParseConfigJBODFile() throws TimeoutException, BrokerCapacityResolutionException { BrokerCapacityConfigResolver configResolver = getBrokerCapacityConfigResolver("testCapacityConfigJBOD.json", this.getClass()); assertEquals(2000000.0, configResolver.capacityForBroker("", "", 0, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false) .capacity().get(Resource.DISK), 0.01); assertEquals(2200000.0, configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true) .capacity().get(Resource.DISK), 0.01); assertEquals(200000.0, configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true) .diskCapacityByLogDir().get("/tmp/kafka-logs-4"), 0.01); assertFalse(configResolver.capacityForBroker("", "", 2, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).isEstimated()); assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).isEstimated()); assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).estimationInfo().length() > 0); }
@Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); static final String CAPACITY_CONFIG_FILE; static final int DEFAULT_CAPACITY_BROKER_ID; static final double DEFAULT_CPU_CAPACITY_WITH_CORES; }
@Test public void testParseConfigCoresFile() throws TimeoutException, BrokerCapacityResolutionException { BrokerCapacityConfigResolver configResolver = getBrokerCapacityConfigResolver("testCapacityConfigCores.json", this.getClass()); assertEquals(BrokerCapacityConfigFileResolver.DEFAULT_CPU_CAPACITY_WITH_CORES, configResolver .capacityForBroker("", "", 0, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).capacity().get(Resource.CPU), 0.01); assertEquals(BrokerCapacityConfigFileResolver.DEFAULT_CPU_CAPACITY_WITH_CORES, configResolver .capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).capacity().get(Resource.CPU), 0.01); assertEquals(8, configResolver.capacityForBroker("", "", 0, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).numCpuCores()); assertEquals(64, configResolver.capacityForBroker("", "", 1, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).numCpuCores()); assertEquals(16, configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).numCpuCores()); assertFalse(configResolver.capacityForBroker("", "", 1, BROKER_CAPACITY_FETCH_TIMEOUT_MS, false).isEstimated()); assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).isEstimated()); assertTrue(configResolver.capacityForBroker("", "", 3, BROKER_CAPACITY_FETCH_TIMEOUT_MS, true).estimationInfo().length() > 0); }
@Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); }
BrokerCapacityConfigFileResolver implements BrokerCapacityConfigResolver { @Override public BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation) throws BrokerCapacityResolutionException { if (brokerId >= 0) { BrokerCapacityInfo capacity = _capacitiesForBrokers.get(brokerId); if (capacity != null) { return capacity; } else { if (allowCapacityEstimation) { String info = String.format("Missing broker id(%d) in capacity config file.", brokerId); return new BrokerCapacityInfo(_capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).capacity(), info, _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).diskCapacityByLogDir(), _capacitiesForBrokers.get(DEFAULT_CAPACITY_BROKER_ID).numCpuCores()); } else { throw new BrokerCapacityResolutionException(String.format("Unable to resolve capacity of broker %d. Either (1) adding the " + "default broker capacity (via adding capacity for broker %d and allowing capacity estimation) or (2) adding missing " + "broker's capacity in file %s.", brokerId, DEFAULT_CAPACITY_BROKER_ID, _configFile)); } } } else { throw new IllegalArgumentException("The broker id(" + brokerId + ") should be non-negative."); } } @Override void configure(Map<String, ?> configs); @Override BrokerCapacityInfo capacityForBroker(String rack, String host, int brokerId, long timeoutMs, boolean allowCapacityEstimation); @Override void close(); static final String CAPACITY_CONFIG_FILE; static final int DEFAULT_CAPACITY_BROKER_ID; static final double DEFAULT_CPU_CAPACITY_WITH_CORES; }
@Test public void testRefer() { OperationProgress progress1 = new OperationProgress(); progress1.addStep(new Pending()); OperationProgress progress2 = new OperationProgress(); progress2.addStep(new WaitingForClusterModel()); assertTrue(progress1.progress().get(0) instanceof Pending); progress1.refer(progress2); assertTrue(progress1.progress().get(0) instanceof WaitingForClusterModel); assertEquals(progress1.progress(), progress2.progress()); }
public void refer(OperationProgress other) { List<OperationStep> steps; List<Long> startTimes; synchronized (other) { steps = other._steps; startTimes = other._startTimes; } synchronized (this) { ensureMutable(); this._steps = steps; this._startTimes = startTimes; this._mutable = false; } }
OperationProgress { public void refer(OperationProgress other) { List<OperationStep> steps; List<Long> startTimes; synchronized (other) { steps = other._steps; startTimes = other._startTimes; } synchronized (this) { ensureMutable(); this._steps = steps; this._startTimes = startTimes; this._mutable = false; } } }
OperationProgress { public void refer(OperationProgress other) { List<OperationStep> steps; List<Long> startTimes; synchronized (other) { steps = other._steps; startTimes = other._startTimes; } synchronized (this) { ensureMutable(); this._steps = steps; this._startTimes = startTimes; this._mutable = false; } } OperationProgress(); OperationProgress(String operation); }
OperationProgress { public void refer(OperationProgress other) { List<OperationStep> steps; List<Long> startTimes; synchronized (other) { steps = other._steps; startTimes = other._startTimes; } synchronized (this) { ensureMutable(); this._steps = steps; this._startTimes = startTimes; this._mutable = false; } } OperationProgress(); OperationProgress(String operation); synchronized void addStep(OperationStep step); void refer(OperationProgress other); synchronized List<OperationStep> progress(); synchronized void clear(); @Override synchronized String toString(); Map<String, Object> getJsonStructure(); }
OperationProgress { public void refer(OperationProgress other) { List<OperationStep> steps; List<Long> startTimes; synchronized (other) { steps = other._steps; startTimes = other._startTimes; } synchronized (this) { ensureMutable(); this._steps = steps; this._startTimes = startTimes; this._mutable = false; } } OperationProgress(); OperationProgress(String operation); synchronized void addStep(OperationStep step); void refer(OperationProgress other); synchronized List<OperationStep> progress(); synchronized void clear(); @Override synchronized String toString(); Map<String, Object> getJsonStructure(); }
@Test public void testFailSignatureValidation() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER, SecurityUtils.NO_CREDENTIAL, new String[] {"USER"}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER); TokenGenerator.TokenAndKeys tokenAndKeys2 = TokenGenerator.generateToken(TEST_USER); JwtLoginService loginService = new JwtLoginService(new UserStoreAuthorizationService(testUserStore), tokenAndKeys2.publicKey(), null); SignedJWT jwtToken = SignedJWT.parse(tokenAndKeys.token()); HttpServletRequest request = mock(HttpServletRequest.class); UserIdentity identity = loginService.login(TEST_USER, jwtToken, request); assertNull(identity); }
@Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); static final String X_509_CERT_TYPE; }
@Test public void testFailAudienceValidation() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER, SecurityUtils.NO_CREDENTIAL, new String[] {"USER"}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER, Arrays.asList("A", "B")); JwtLoginService loginService = new JwtLoginService( new UserStoreAuthorizationService(testUserStore), tokenAndKeys.publicKey(), Arrays.asList("C", "D")); SignedJWT jwtToken = SignedJWT.parse(tokenAndKeys.token()); HttpServletRequest request = mock(HttpServletRequest.class); UserIdentity identity = loginService.login(TEST_USER, jwtToken, request); assertNull(identity); }
@Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); static final String X_509_CERT_TYPE; }
@Test public void testFailExpirationValidation() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER, SecurityUtils.NO_CREDENTIAL, new String[] {"USER"}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER, 1L); JwtLoginService loginService = new JwtLoginService(new UserStoreAuthorizationService(testUserStore), tokenAndKeys.publicKey(), null); SignedJWT jwtToken = SignedJWT.parse(tokenAndKeys.token()); HttpServletRequest request = mock(HttpServletRequest.class); UserIdentity identity = loginService.login(TEST_USER, jwtToken, request); assertNull(identity); }
@Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); }
JwtLoginService extends AbstractLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(credentials instanceof SignedJWT)) { return null; } if (!(request instanceof HttpServletRequest)) { return null; } SignedJWT jwtToken = (SignedJWT) credentials; JWTClaimsSet claimsSet; boolean valid; try { claimsSet = jwtToken.getJWTClaimsSet(); valid = validateToken(jwtToken, claimsSet, username); } catch (ParseException e) { JWT_LOGGER.warn(String.format("%s: Couldn't parse a JWT token", username), e); return null; } if (valid) { String serializedToken = (String) request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE); UserIdentity rolesDelegate = _authorizationService.getUserIdentity((HttpServletRequest) request, username); if (rolesDelegate == null) { return null; } else { return getUserIdentity(jwtToken, claimsSet, serializedToken, username, rolesDelegate); } } else { return null; } } JwtLoginService(AuthorizationService authorizationService, String publicKeyLocation, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences); JwtLoginService(AuthorizationService authorizationService, RSAPublicKey publicKey, List<String> audiences, Clock clock); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); static final String X_509_CERT_TYPE; }
@Test public void testParseTokenFromAuthHeader() { JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); HttpServletRequest request = mock(HttpServletRequest.class); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(JwtAuthenticator.BEARER + " " + EXPECTED_TOKEN); replay(request); String actualToken = authenticator.getJwtFromBearerAuthorization(request); verify(request); assertEquals(EXPECTED_TOKEN, actualToken); }
String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testParseTokenFromAuthHeaderNoBearer() { JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); HttpServletRequest request = mock(HttpServletRequest.class); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(BASIC_SCHEME + " " + EXPECTED_TOKEN); replay(request); String actualToken = authenticator.getJwtFromBearerAuthorization(request); verify(request); assertNull(actualToken); }
String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromBearerAuthorization(HttpServletRequest req) { String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString()); if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) { return null; } else { return authorizationHeader.substring(BEARER.length()).trim(); } } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testAggregationOption1() { MetricSampleAggregator<String, IntegerEntity> aggregator = prepareCompletenessTestEnv(); AggregationOptions<String, IntegerEntity> options = new AggregationOptions<>(0.5, 1, NUM_WINDOWS, 5, new HashSet<>(Arrays.asList(ENTITY1, ENTITY2, ENTITY3)), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, IntegerEntity> completeness = aggregator.completeness(-1, Long.MAX_VALUE, options); assertTrue(completeness.validWindowIndices().isEmpty()); assertTrue(completeness.validEntities().isEmpty()); assertTrue(completeness.validEntityGroups().isEmpty()); assertCompletenessByWindowIndex(completeness); }
public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testParseTokenFromCookie() { JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); HttpServletRequest request = mock(HttpServletRequest.class); expect(request.getCookies()).andReturn(new Cookie[] {new Cookie(JWT_TOKEN, EXPECTED_TOKEN)}); replay(request); String actualToken = authenticator.getJwtFromCookie(request); verify(request); assertEquals(EXPECTED_TOKEN, actualToken); }
String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testParseTokenFromCookieNoJwtCookie() { JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); HttpServletRequest request = mock(HttpServletRequest.class); expect(request.getCookies()).andReturn(new Cookie[] {new Cookie(RANDOM_COOKIE_NAME, "")}); replay(request); String actualToken = authenticator.getJwtFromCookie(request); verify(request); assertNull(actualToken); }
String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { String getJwtFromCookie(HttpServletRequest req) { String serializedJWT = null; Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { if (_cookieName != null && _cookieName.equals(cookie.getName())) { JWT_LOGGER.trace(_cookieName + " cookie has been found and is being processed"); serializedJWT = cookie.getValue(); break; } } } return serializedJWT; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testRedirect() throws IOException, ServerAuthException { JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); HttpServletRequest request = mock(HttpServletRequest.class); expect(request.getMethod()).andReturn(HttpMethod.GET.asString()); expect(request.getQueryString()).andReturn(null); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(null); expect(request.getCookies()).andReturn(new Cookie[] {}); expect(request.getRequestURL()).andReturn(new StringBuffer(CRUISE_CONTROL_ENDPOINT)); HttpServletResponse response = mock(HttpServletResponse.class); response.sendRedirect(TOKEN_PROVIDER.replace(JwtAuthenticator.REDIRECT_URL, CRUISE_CONTROL_ENDPOINT)); expectLastCall().andVoid(); replay(request, response); Authentication actualAuthentication = authenticator.validateRequest(request, response, true); verify(request, response); assertEquals(Authentication.SEND_CONTINUE, actualAuthentication); }
@Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testSuccessfulLogin() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER, SecurityUtils.NO_CREDENTIAL, new String[]{USER_ROLE}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER); JwtLoginService loginService = new JwtLoginService(new UserStoreAuthorizationService(testUserStore), tokenAndKeys.publicKey(), null); Authenticator.AuthConfiguration configuration = mock(Authenticator.AuthConfiguration.class); expect(configuration.getLoginService()).andReturn(loginService); expect(configuration.getIdentityService()).andReturn(new DefaultIdentityService()); expect(configuration.isSessionRenewedOnAuthentication()).andReturn(true); Request request = niceMock(Request.class); expect(request.getMethod()).andReturn(HttpMethod.GET.asString()); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(null); request.setAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE, tokenAndKeys.token()); expectLastCall().andVoid(); expect(request.getCookies()).andReturn(new Cookie[] {new Cookie(JWT_TOKEN, tokenAndKeys.token())}); expect(request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE)).andReturn(tokenAndKeys.token()); HttpServletResponse response = mock(HttpServletResponse.class); replay(configuration, request, response); JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); authenticator.setConfiguration(configuration); UserAuthentication authentication = (UserAuthentication) authenticator.validateRequest(request, response, true); verify(configuration, request, response); assertNotNull(authentication); assertTrue(authentication.getUserIdentity().getUserPrincipal() instanceof JwtUserPrincipal); JwtUserPrincipal userPrincipal = (JwtUserPrincipal) authentication.getUserIdentity().getUserPrincipal(); assertEquals(TEST_USER, userPrincipal.getName()); assertEquals(tokenAndKeys.token(), userPrincipal.getSerializedToken()); }
@Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testFailedLoginWithUserNotFound() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER_2, SecurityUtils.NO_CREDENTIAL, new String[] {USER_ROLE}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER); JwtLoginService loginService = new JwtLoginService(new UserStoreAuthorizationService(testUserStore), tokenAndKeys.publicKey(), null); Authenticator.AuthConfiguration configuration = mock(Authenticator.AuthConfiguration.class); expect(configuration.getLoginService()).andReturn(loginService); expect(configuration.getIdentityService()).andReturn(new DefaultIdentityService()); expect(configuration.isSessionRenewedOnAuthentication()).andReturn(true); Request request = niceMock(Request.class); expect(request.getMethod()).andReturn(HttpMethod.GET.asString()); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(null); request.setAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE, tokenAndKeys.token()); expectLastCall().andVoid(); expect(request.getCookies()).andReturn(new Cookie[] {new Cookie(JWT_TOKEN, tokenAndKeys.token())}); expect(request.getAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE)).andReturn(tokenAndKeys.token()); HttpServletResponse response = mock(HttpServletResponse.class); response.setStatus(HttpStatus.UNAUTHORIZED_401); expectLastCall().andVoid(); replay(configuration, request, response); JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); authenticator.setConfiguration(configuration); Authentication authentication = authenticator.validateRequest(request, response, true); verify(configuration, request, response); assertNotNull(authentication); assertEquals(Authentication.SEND_FAILURE, authentication); }
@Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testFailedLoginWithInvalidToken() throws Exception { UserStore testUserStore = new UserStore(); testUserStore.addUser(TEST_USER_2, SecurityUtils.NO_CREDENTIAL, new String[] {USER_ROLE}); TokenGenerator.TokenAndKeys tokenAndKeys = TokenGenerator.generateToken(TEST_USER); TokenGenerator.TokenAndKeys tokenAndKeys2 = TokenGenerator.generateToken(TEST_USER); JwtLoginService loginService = new JwtLoginService(new UserStoreAuthorizationService(testUserStore), tokenAndKeys.publicKey(), null); Authenticator.AuthConfiguration configuration = mock(Authenticator.AuthConfiguration.class); expect(configuration.getLoginService()).andReturn(loginService); expect(configuration.getIdentityService()).andReturn(new DefaultIdentityService()); expect(configuration.isSessionRenewedOnAuthentication()).andReturn(true); Request request = niceMock(Request.class); expect(request.getMethod()).andReturn(HttpMethod.GET.asString()); expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(null); request.setAttribute(JwtAuthenticator.JWT_TOKEN_REQUEST_ATTRIBUTE, tokenAndKeys2.token()); expectLastCall().andVoid(); expect(request.getCookies()).andReturn(new Cookie[] {new Cookie(JWT_TOKEN, tokenAndKeys2.token())}); HttpServletResponse response = mock(HttpServletResponse.class); response.setStatus(HttpStatus.UNAUTHORIZED_401); expectLastCall().andVoid(); replay(configuration, request, response); JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN); authenticator.setConfiguration(configuration); Authentication authentication = authenticator.validateRequest(request, response, true); verify(configuration, request, response); assertNotNull(authentication); assertEquals(Authentication.SEND_FAILURE, authentication); }
@Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); }
JwtAuthenticator extends LoginAuthenticator { @Override public Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory) throws ServerAuthException { JWT_LOGGER.trace("Authentication request received for " + request.toString()); if (!(request instanceof HttpServletRequest) && !(response instanceof HttpServletResponse)) { return Authentication.UNAUTHENTICATED; } String serializedJWT; HttpServletRequest req = (HttpServletRequest) request; if (HttpMethod.OPTIONS.name().toLowerCase().equals(req.getMethod().toLowerCase())) { return Authentication.NOT_CHECKED; } serializedJWT = getJwtFromBearerAuthorization(req); if (serializedJWT == null) { serializedJWT = getJwtFromCookie(req); } if (serializedJWT == null) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.info("No JWT token found, sending redirect to " + loginURL); try { ((HttpServletResponse) response).sendRedirect(loginURL); return Authentication.SEND_CONTINUE; } catch (IOException e) { JWT_LOGGER.error("Couldn't authenticate request", e); throw new ServerAuthException(e); } } else { try { SignedJWT jwtToken = SignedJWT.parse(serializedJWT); String userName = jwtToken.getJWTClaimsSet().getSubject(); request.setAttribute(JWT_TOKEN_REQUEST_ATTRIBUTE, serializedJWT); UserIdentity identity = login(userName, jwtToken, request); if (identity == null) { ((HttpServletResponse) response).setStatus(HttpStatus.UNAUTHORIZED_401); return Authentication.SEND_FAILURE; } else { return new UserAuthentication(getAuthMethod(), identity); } } catch (ParseException pe) { String loginURL = _authenticationProviderUrlGenerator.apply(req); JWT_LOGGER.warn("Unable to parse the JWT token, redirecting back to the login page", pe); try { ((HttpServletResponse) response).sendRedirect(loginURL); } catch (IOException e) { throw new ServerAuthException(e); } } } return Authentication.SEND_FAILURE; } JwtAuthenticator(String authenticationProviderUrl, String cookieName); @Override String getAuthMethod(); @Override void prepareRequest(ServletRequest request); @Override Authentication validateRequest(ServletRequest request, ServletResponse response, boolean mandatory); @Override boolean secureResponse(ServletRequest request, ServletResponse response, boolean mandatory, Authentication.User validatedUser); static final String JWT_TOKEN_REQUEST_ATTRIBUTE; }
@Test public void testSuccessfulLoginWithIpFiltering() throws Exception { TrustedProxyAuthorizationService srv = new TrustedProxyAuthorizationService(Collections.singletonList(AUTH_SERVICE_NAME), IP_FILTER); HttpServletRequest mockRequest = mock(HttpServletRequest.class); expect(mockRequest.getRemoteAddr()).andReturn("192.168.0.1"); replay(mockRequest); srv.start(); try { UserIdentity result = srv.getUserIdentity(mockRequest, AUTH_SERVICE_NAME); assertNotNull(result); assertEquals(AUTH_SERVICE_NAME, result.getUserPrincipal().getName()); } finally { srv.stop(); } }
@Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
@Test public void testUnsuccessfulLoginWithIpFiltering() throws Exception { TrustedProxyAuthorizationService srv = new TrustedProxyAuthorizationService(Collections.singletonList(AUTH_SERVICE_NAME), IP_FILTER); HttpServletRequest mockRequest = mock(HttpServletRequest.class); expect(mockRequest.getRemoteAddr()).andReturn("192.167.0.1"); replay(mockRequest); srv.start(); try { UserIdentity result = srv.getUserIdentity(mockRequest, AUTH_SERVICE_NAME); assertNull(result); } finally { srv.stop(); } }
@Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
@Test public void testSuccessfulLoginWithoutIpFiltering() throws Exception { TrustedProxyAuthorizationService srv = new TrustedProxyAuthorizationService(Collections.singletonList(AUTH_SERVICE_NAME), null); HttpServletRequest mockRequest = mock(HttpServletRequest.class); srv.start(); try { UserIdentity result = srv.getUserIdentity(mockRequest, AUTH_SERVICE_NAME); assertNotNull(result); assertEquals(AUTH_SERVICE_NAME, result.getUserPrincipal().getName()); } finally { srv.stop(); } }
@Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
TrustedProxyAuthorizationService extends AbstractLifeCycle implements AuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int nameHostSeparatorIndex = name.indexOf('/'); String serviceName = nameHostSeparatorIndex > 0 ? name.substring(0, nameHostSeparatorIndex) : name; UserIdentity serviceIdentity = _adminUserStore.getUserIdentity(serviceName); if (_trustedProxyIpPattern != null) { return _trustedProxyIpPattern.matcher(request.getRemoteAddr()).matches() ? serviceIdentity : null; } else { return serviceIdentity; } } TrustedProxyAuthorizationService(List<String> userNames, String trustedProxyIpPattern); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
@Test public void testInvalidAuthServiceUser() { ConfigurableSpnegoLoginService mockSpnegoLoginService = mock(ConfigurableSpnegoLoginService.class); SpnegoUserPrincipal servicePrincipal = new SpnegoUserPrincipal(TEST_SERVICE_USER, ENCODED_TOKEN); Subject subject = new Subject(true, Collections.singleton(servicePrincipal), Collections.emptySet(), Collections.emptySet()); SpnegoUserIdentity result = new SpnegoUserIdentity(subject, servicePrincipal, null); expect(mockSpnegoLoginService.login(anyString(), anyObject(), anyObject())).andReturn(result); TestAuthorizer userAuthorizer = new TestAuthorizer(TEST_USER); HttpServletRequest mockRequest = mock(HttpServletRequest.class); expect(mockRequest.getParameter(DO_AS)).andReturn(TEST_USER); replay(mockSpnegoLoginService); TrustedProxyLoginService trustedProxyLoginService = new TrustedProxyLoginService(mockSpnegoLoginService, userAuthorizer); UserIdentity doAsIdentity = trustedProxyLoginService.login(null, ENCODED_TOKEN, mockRequest); assertNotNull(doAsIdentity); assertFalse(((SpnegoUserIdentity) doAsIdentity).isEstablished()); }
@Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(request instanceof HttpServletRequest)) { return null; } SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request); SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal(); String doAsUser = request.getParameter(DO_AS); LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName()); UserIdentity doAsIdentity = null; if (doAsUser != null && !doAsUser.isEmpty()) { doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser); } Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal); Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); if (!serviceIdentity.isEstablished()) { LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName()); return new SpnegoUserIdentity(subject, principal, null); } else { if (doAsIdentity == null) { LOG.info("Couldn't authorize user {}", doAsUser); } return new SpnegoUserIdentity(subject, principal, doAsIdentity); } }
TrustedProxyLoginService extends ContainerLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(request instanceof HttpServletRequest)) { return null; } SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request); SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal(); String doAsUser = request.getParameter(DO_AS); LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName()); UserIdentity doAsIdentity = null; if (doAsUser != null && !doAsUser.isEmpty()) { doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser); } Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal); Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); if (!serviceIdentity.isEstablished()) { LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName()); return new SpnegoUserIdentity(subject, principal, null); } else { if (doAsIdentity == null) { LOG.info("Couldn't authorize user {}", doAsUser); } return new SpnegoUserIdentity(subject, principal, doAsIdentity); } } }
TrustedProxyLoginService extends ContainerLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(request instanceof HttpServletRequest)) { return null; } SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request); SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal(); String doAsUser = request.getParameter(DO_AS); LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName()); UserIdentity doAsIdentity = null; if (doAsUser != null && !doAsUser.isEmpty()) { doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser); } Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal); Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); if (!serviceIdentity.isEstablished()) { LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName()); return new SpnegoUserIdentity(subject, principal, null); } else { if (doAsIdentity == null) { LOG.info("Couldn't authorize user {}", doAsUser); } return new SpnegoUserIdentity(subject, principal, doAsIdentity); } } TrustedProxyLoginService(String realm, AuthorizationService userAuthorizer, List<String> trustedProxies, String trustedProxyIpPattern); TrustedProxyLoginService(ConfigurableSpnegoLoginService delegateSpnegoLoginService, AuthorizationService userAuthorizer); }
TrustedProxyLoginService extends ContainerLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(request instanceof HttpServletRequest)) { return null; } SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request); SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal(); String doAsUser = request.getParameter(DO_AS); LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName()); UserIdentity doAsIdentity = null; if (doAsUser != null && !doAsUser.isEmpty()) { doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser); } Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal); Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); if (!serviceIdentity.isEstablished()) { LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName()); return new SpnegoUserIdentity(subject, principal, null); } else { if (doAsIdentity == null) { LOG.info("Couldn't authorize user {}", doAsUser); } return new SpnegoUserIdentity(subject, principal, doAsIdentity); } } TrustedProxyLoginService(String realm, AuthorizationService userAuthorizer, List<String> trustedProxies, String trustedProxyIpPattern); TrustedProxyLoginService(ConfigurableSpnegoLoginService delegateSpnegoLoginService, AuthorizationService userAuthorizer); void setServiceName(String serviceName); void setHostName(String hostName); void setKeyTabPath(Path path); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); }
TrustedProxyLoginService extends ContainerLifeCycle implements LoginService { @Override public UserIdentity login(String username, Object credentials, ServletRequest request) { if (!(request instanceof HttpServletRequest)) { return null; } SpnegoUserIdentity serviceIdentity = (SpnegoUserIdentity) _delegateSpnegoLoginService.login(username, credentials, request); SpnegoUserPrincipal servicePrincipal = (SpnegoUserPrincipal) serviceIdentity.getUserPrincipal(); String doAsUser = request.getParameter(DO_AS); LOG.info("Authorizing proxy user {} from {} service", doAsUser, servicePrincipal.getName()); UserIdentity doAsIdentity = null; if (doAsUser != null && !doAsUser.isEmpty()) { doAsIdentity = _endUserAuthorizer.getUserIdentity((HttpServletRequest) request, doAsUser); } Principal principal = new TrustedProxyPrincipal(doAsUser, servicePrincipal); Subject subject = new Subject(READ_ONLY_SUBJECT, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); if (!serviceIdentity.isEstablished()) { LOG.info("Service user {} isn't authorized as a trusted proxy", servicePrincipal.getName()); return new SpnegoUserIdentity(subject, principal, null); } else { if (doAsIdentity == null) { LOG.info("Couldn't authorize user {}", doAsUser); } return new SpnegoUserIdentity(subject, principal, doAsIdentity); } } TrustedProxyLoginService(String realm, AuthorizationService userAuthorizer, List<String> trustedProxies, String trustedProxyIpPattern); TrustedProxyLoginService(ConfigurableSpnegoLoginService delegateSpnegoLoginService, AuthorizationService userAuthorizer); void setServiceName(String serviceName); void setHostName(String hostName); void setKeyTabPath(Path path); @Override String getName(); @Override UserIdentity login(String username, Object credentials, ServletRequest request); @Override boolean validate(UserIdentity user); @Override IdentityService getIdentityService(); @Override void setIdentityService(IdentityService service); @Override void logout(UserIdentity user); static final boolean READ_ONLY_SUBJECT; }
@Test public void testAggregationOption2() { MetricSampleAggregator<String, IntegerEntity> aggregator = prepareCompletenessTestEnv(); AggregationOptions<String, IntegerEntity> options = new AggregationOptions<>(0.5, 0.0, NUM_WINDOWS, 5, new HashSet<>(Arrays.asList(ENTITY1, ENTITY2, ENTITY3)), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, IntegerEntity> completeness = aggregator.completeness(-1, Long.MAX_VALUE, options); assertEquals(17, completeness.validWindowIndices().size()); assertFalse(completeness.validWindowIndices().contains(3L)); assertFalse(completeness.validWindowIndices().contains(4L)); assertFalse(completeness.validWindowIndices().contains(20L)); assertEquals(2, completeness.validEntities().size()); assertTrue(completeness.validEntities().contains(ENTITY1)); assertTrue(completeness.validEntities().contains(ENTITY3)); assertEquals(1, completeness.validEntityGroups().size()); assertTrue(completeness.validEntityGroups().contains(ENTITY3.group())); assertCompletenessByWindowIndex(completeness); }
public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testPrincipalNames() { UserStore users = new UserStore(); users.addUser(TEST_USER, SecurityUtils.NO_CREDENTIAL, new String[] { DefaultRoleSecurityProvider.ADMIN }); UserStoreAuthorizationService usas = new SpnegoUserStoreAuthorizationService(users); UserIdentity result = usas.getUserIdentity(null, TEST_USER + "/host@REALM"); assertNotNull(result); assertEquals(TEST_USER, result.getUserPrincipal().getName()); result = usas.getUserIdentity(null, TEST_USER + "@REALM"); assertNotNull(result); assertEquals(TEST_USER, result.getUserPrincipal().getName()); result = usas.getUserIdentity(null, TEST_USER + "/host"); assertNotNull(result); assertEquals(TEST_USER, result.getUserPrincipal().getName()); result = usas.getUserIdentity(null, TEST_USER); assertNotNull(result); assertEquals(TEST_USER, result.getUserPrincipal().getName()); }
@Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int hostSeparator = name.indexOf('/'); String shortName = hostSeparator > 0 ? name.substring(0, hostSeparator) : name; int realmSeparator = shortName.indexOf('@'); shortName = realmSeparator > 0 ? shortName.substring(0, realmSeparator) : shortName; return super.getUserIdentity(request, shortName); }
SpnegoUserStoreAuthorizationService extends UserStoreAuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int hostSeparator = name.indexOf('/'); String shortName = hostSeparator > 0 ? name.substring(0, hostSeparator) : name; int realmSeparator = shortName.indexOf('@'); shortName = realmSeparator > 0 ? shortName.substring(0, realmSeparator) : shortName; return super.getUserIdentity(request, shortName); } }
SpnegoUserStoreAuthorizationService extends UserStoreAuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int hostSeparator = name.indexOf('/'); String shortName = hostSeparator > 0 ? name.substring(0, hostSeparator) : name; int realmSeparator = shortName.indexOf('@'); shortName = realmSeparator > 0 ? shortName.substring(0, realmSeparator) : shortName; return super.getUserIdentity(request, shortName); } SpnegoUserStoreAuthorizationService(String privilegesFilePath); SpnegoUserStoreAuthorizationService(UserStore userStore); }
SpnegoUserStoreAuthorizationService extends UserStoreAuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int hostSeparator = name.indexOf('/'); String shortName = hostSeparator > 0 ? name.substring(0, hostSeparator) : name; int realmSeparator = shortName.indexOf('@'); shortName = realmSeparator > 0 ? shortName.substring(0, realmSeparator) : shortName; return super.getUserIdentity(request, shortName); } SpnegoUserStoreAuthorizationService(String privilegesFilePath); SpnegoUserStoreAuthorizationService(UserStore userStore); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
SpnegoUserStoreAuthorizationService extends UserStoreAuthorizationService { @Override public UserIdentity getUserIdentity(HttpServletRequest request, String name) { int hostSeparator = name.indexOf('/'); String shortName = hostSeparator > 0 ? name.substring(0, hostSeparator) : name; int realmSeparator = shortName.indexOf('@'); shortName = realmSeparator > 0 ? shortName.substring(0, realmSeparator) : shortName; return super.getUserIdentity(request, shortName); } SpnegoUserStoreAuthorizationService(String privilegesFilePath); SpnegoUserStoreAuthorizationService(UserStore userStore); @Override UserIdentity getUserIdentity(HttpServletRequest request, String name); }
@Test public void testSetExecutionThread() { OperationFuture future = new OperationFuture("testSetExecutionThread"); assertTrue(future.setExecutionThread(new Thread())); future.cancel(true); assertTrue("Should be able to set the execution thread of canceled future to null", future.setExecutionThread(null)); assertFalse("Should failed to set execution thread for the canceled future.", future.setExecutionThread(new Thread())); }
public synchronized boolean setExecutionThread(Thread t) { if (isCancelled() && t != null) { return false; } else { _executionThread = t; return true; } }
OperationFuture extends CompletableFuture<CruiseControlResponse> { public synchronized boolean setExecutionThread(Thread t) { if (isCancelled() && t != null) { return false; } else { _executionThread = t; return true; } } }
OperationFuture extends CompletableFuture<CruiseControlResponse> { public synchronized boolean setExecutionThread(Thread t) { if (isCancelled() && t != null) { return false; } else { _executionThread = t; return true; } } OperationFuture(String operation); }
OperationFuture extends CompletableFuture<CruiseControlResponse> { public synchronized boolean setExecutionThread(Thread t) { if (isCancelled() && t != null) { return false; } else { _executionThread = t; return true; } } OperationFuture(String operation); @Override synchronized boolean cancel(boolean mayInterruptIfRunning); @Override CruiseControlResponse get(); String operation(); synchronized boolean setExecutionThread(Thread t); String progressString(); Map<String, Object> getJsonStructure(); OperationProgress operationProgress(); void setFinishTimeNs(long finishTimeNs); long finishTimeNs(); }
OperationFuture extends CompletableFuture<CruiseControlResponse> { public synchronized boolean setExecutionThread(Thread t) { if (isCancelled() && t != null) { return false; } else { _executionThread = t; return true; } } OperationFuture(String operation); @Override synchronized boolean cancel(boolean mayInterruptIfRunning); @Override CruiseControlResponse get(); String operation(); synchronized boolean setExecutionThread(Thread t); String progressString(); Map<String, Object> getJsonStructure(); OperationProgress operationProgress(); void setFinishTimeNs(long finishTimeNs); long finishTimeNs(); }
@Test public void testPopulateRackInfoForReplicationFactorChange() { Map<String, List<Integer>> brokersByRack = new HashMap<>(); Map<Integer, String> rackByBroker = new HashMap<>(); assertThrows(RuntimeException.class, () -> populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) (NODES.length + 1), Collections.singleton(TOPIC)), CLUSTER, false, brokersByRack, rackByBroker)); assertThrows(RuntimeException.class, () -> populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) NODES.length, Collections.singleton(TOPIC)), CLUSTER, false, brokersByRack, rackByBroker)); populateRackInfoForReplicationFactorChange(Collections.singletonMap((short) NODES.length, Collections.singleton(TOPIC)), CLUSTER, true, brokersByRack, rackByBroker); assertEquals(2, brokersByRack.size()); assertEquals(NODES.length, rackByBroker.size()); }
public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker) { for (Node node : cluster.nodes()) { String rack = getRackHandleNull(node); brokersByRack.putIfAbsent(rack, new ArrayList<>()); brokersByRack.get(rack).add(node.id()); rackByBroker.put(node.id(), rack); } topicsByReplicationFactor.forEach((replicationFactor, topics) -> { if (replicationFactor > rackByBroker.size()) { throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d " + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.", topics, replicationFactor, rackByBroker.size())); } else if (replicationFactor > brokersByRack.size()) { if (skipTopicRackAwarenessCheck) { LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness " + "property will be violated to add new replicas.", topics, replicationFactor); } else { throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d " + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.", topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM)); } } }); }
RunnableUtils { public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker) { for (Node node : cluster.nodes()) { String rack = getRackHandleNull(node); brokersByRack.putIfAbsent(rack, new ArrayList<>()); brokersByRack.get(rack).add(node.id()); rackByBroker.put(node.id(), rack); } topicsByReplicationFactor.forEach((replicationFactor, topics) -> { if (replicationFactor > rackByBroker.size()) { throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d " + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.", topics, replicationFactor, rackByBroker.size())); } else if (replicationFactor > brokersByRack.size()) { if (skipTopicRackAwarenessCheck) { LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness " + "property will be violated to add new replicas.", topics, replicationFactor); } else { throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d " + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.", topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM)); } } }); } }
RunnableUtils { public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker) { for (Node node : cluster.nodes()) { String rack = getRackHandleNull(node); brokersByRack.putIfAbsent(rack, new ArrayList<>()); brokersByRack.get(rack).add(node.id()); rackByBroker.put(node.id(), rack); } topicsByReplicationFactor.forEach((replicationFactor, topics) -> { if (replicationFactor > rackByBroker.size()) { throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d " + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.", topics, replicationFactor, rackByBroker.size())); } else if (replicationFactor > brokersByRack.size()) { if (skipTopicRackAwarenessCheck) { LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness " + "property will be violated to add new replicas.", topics, replicationFactor); } else { throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d " + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.", topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM)); } } }); } private RunnableUtils(); }
RunnableUtils { public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker) { for (Node node : cluster.nodes()) { String rack = getRackHandleNull(node); brokersByRack.putIfAbsent(rack, new ArrayList<>()); brokersByRack.get(rack).add(node.id()); rackByBroker.put(node.id(), rack); } topicsByReplicationFactor.forEach((replicationFactor, topics) -> { if (replicationFactor > rackByBroker.size()) { throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d " + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.", topics, replicationFactor, rackByBroker.size())); } else if (replicationFactor > brokersByRack.size()) { if (skipTopicRackAwarenessCheck) { LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness " + "property will be violated to add new replicas.", topics, replicationFactor); } else { throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d " + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.", topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM)); } } }); } private RunnableUtils(); static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker); static Map<Short, Set<String>> topicsForReplicationFactorChange(Map<Short, Pattern> topicPatternByReplicationFactor, Cluster cluster); static boolean shouldRefreshClusterAndGeneration(Set<CruiseControlState.SubState> substates); static PartitionInfo partitionWithOfflineReplicas(Cluster cluster); static boolean isKafkaAssignerMode(Collection<String> goals); static void sanityCheckBrokersHavingOfflineReplicasOnBadDisks(List<String> goals, ClusterModel clusterModel); static void maybeStopOngoingExecutionToModifyAndWait(KafkaCruiseControl kafkaCruiseControl, OperationProgress operationProgress); static OptimizationOptions computeOptimizationOptions(ClusterModel clusterModel, boolean isTriggeredByGoalViolation, KafkaCruiseControl kafkaCruiseControl, Set<Integer> brokersToDrop, boolean dryRun, boolean excludeRecentlyDemotedBrokers, boolean excludeRecentlyRemovedBrokers, Pattern excludedTopicsPattern, Set<Integer> requestedDestinationBrokerIds, boolean onlyMoveImmigrantReplicas); static void sanityCheckOfflineReplicaPresence(ClusterModel clusterModel); }
RunnableUtils { public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker) { for (Node node : cluster.nodes()) { String rack = getRackHandleNull(node); brokersByRack.putIfAbsent(rack, new ArrayList<>()); brokersByRack.get(rack).add(node.id()); rackByBroker.put(node.id(), rack); } topicsByReplicationFactor.forEach((replicationFactor, topics) -> { if (replicationFactor > rackByBroker.size()) { throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d " + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.", topics, replicationFactor, rackByBroker.size())); } else if (replicationFactor > brokersByRack.size()) { if (skipTopicRackAwarenessCheck) { LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness " + "property will be violated to add new replicas.", topics, replicationFactor); } else { throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d " + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.", topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM)); } } }); } private RunnableUtils(); static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor, Cluster cluster, boolean skipTopicRackAwarenessCheck, Map<String, List<Integer>> brokersByRack, Map<Integer, String> rackByBroker); static Map<Short, Set<String>> topicsForReplicationFactorChange(Map<Short, Pattern> topicPatternByReplicationFactor, Cluster cluster); static boolean shouldRefreshClusterAndGeneration(Set<CruiseControlState.SubState> substates); static PartitionInfo partitionWithOfflineReplicas(Cluster cluster); static boolean isKafkaAssignerMode(Collection<String> goals); static void sanityCheckBrokersHavingOfflineReplicasOnBadDisks(List<String> goals, ClusterModel clusterModel); static void maybeStopOngoingExecutionToModifyAndWait(KafkaCruiseControl kafkaCruiseControl, OperationProgress operationProgress); static OptimizationOptions computeOptimizationOptions(ClusterModel clusterModel, boolean isTriggeredByGoalViolation, KafkaCruiseControl kafkaCruiseControl, Set<Integer> brokersToDrop, boolean dryRun, boolean excludeRecentlyDemotedBrokers, boolean excludeRecentlyRemovedBrokers, Pattern excludedTopicsPattern, Set<Integer> requestedDestinationBrokerIds, boolean onlyMoveImmigrantReplicas); static void sanityCheckOfflineReplicaPresence(ClusterModel clusterModel); static final boolean SELF_HEALING_DRYRUN; static final Set<Integer> SELF_HEALING_DESTINATION_BROKER_IDS; static final ReplicaMovementStrategy SELF_HEALING_REPLICA_MOVEMENT_STRATEGY; static final Pattern SELF_HEALING_EXCLUDED_TOPICS; static final Integer SELF_HEALING_CONCURRENT_MOVEMENTS; static final Long SELF_HEALING_EXECUTION_PROGRESS_CHECK_INTERVAL_MS; static final boolean SELF_HEALING_SKIP_HARD_GOAL_CHECK; static final boolean SELF_HEALING_STOP_ONGOING_EXECUTION; static final ModelCompletenessRequirements SELF_HEALING_MODEL_COMPLETENESS_REQUIREMENTS; static final boolean SELF_HEALING_SKIP_URP_DEMOTION; static final boolean SELF_HEALING_EXCLUDE_FOLLOWER_DEMOTION; static final boolean SELF_HEALING_SKIP_RACK_AWARENESS_CHECK; static final boolean SELF_HEALING_IS_TRIGGERED_BY_USER_REQUEST; }
@Test public void testMultipleOperationRequest() { TestContext context = prepareRequests(false, 1); SessionManager sessionManager = new SessionManager(1, 1000, context.time(), new MetricRegistry(), null); HttpServletRequest request = context.request(0); OperationFuture future1 = new OperationFuture("future1"); OperationFuture future2 = new OperationFuture("future2"); for (int i = 0; i < 2; i++) { OperationFuture firstFuture = sessionManager.getAndCreateSessionIfNotExist(request, () -> future1, 0); assertSame(firstFuture, future1); future1.complete(new PauseSamplingResult(null)); OperationFuture secondFuture = sessionManager.getAndCreateSessionIfNotExist(request, () -> future2, 1); assertSame(secondFuture, future2); future2.complete(new ResumeSamplingResult(null)); } }
synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); void close(); }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); void close(); }
@Test (expected = IllegalArgumentException.class) public void testSkipStep() { TestContext context = prepareRequests(false, 1); SessionManager sessionManager = new SessionManager(1, 1000, context.time(), new MetricRegistry(), null); HttpServletRequest request = context.request(0); sessionManager.getAndCreateSessionIfNotExist(request, () -> null, 1); }
synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); void close(); }
SessionManager { synchronized OperationFuture getAndCreateSessionIfNotExist(HttpServletRequest request, Supplier<OperationFuture> operation, int step) { HttpSession session = request.getSession(); SessionInfo info = _inProgressSessions.get(session); String requestString = toRequestString(request); if (info != null) { LOG.info("Found existing session {}", session); info.ensureSameRequest(requestString, request.getParameterMap()); if (step < info.numFutures()) { return info.future(step); } else if (step == info.numFutures()) { LOG.info("Adding new future to existing session {}.", session); OperationFuture future = operation.get(); info.addFuture(future); return future; } else { throw new IllegalArgumentException(String.format("There are %d steps in the session. Cannot add step %d.", info.numFutures(), step)); } } else { if (step > 0) { throw new IllegalArgumentException(String.format("There are no step in the session. Cannot add step %d.", step)); } if (_inProgressSessions.size() >= _capacity) { _sessionCreationFailureMeter.mark(); throw new RuntimeException("There are already " + _inProgressSessions.size() + " active sessions, which " + "has reached the servlet capacity."); } LOG.info("Created session for {}", session); info = new SessionInfo(requestString, request.getParameterMap(), ParameterUtils.endPoint(request)); OperationFuture future = operation.get(); info.addFuture(future); _inProgressSessions.put(session, info); return future; } } SessionManager(int capacity, long sessionExpiryMs, Time time, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer); void close(); }
@Test public void testToString() { assertNotNull(new BrokerFailures().toString()); }
@Override public String toString() { StringBuilder sb = new StringBuilder().append("{"); sb.append(_fixable ? "Fixable " : " Unfixable "); sb.append("broker failures detected: {"); if (_failedBrokers != null) { _failedBrokers.forEach((key, value) -> { sb.append("Broker ").append(key).append(" failed at ").append(toDateString(value)).append(",\t"); }); sb.setLength(sb.length() - 2); } sb.append("}}"); return sb.toString(); }
BrokerFailures extends KafkaAnomaly { @Override public String toString() { StringBuilder sb = new StringBuilder().append("{"); sb.append(_fixable ? "Fixable " : " Unfixable "); sb.append("broker failures detected: {"); if (_failedBrokers != null) { _failedBrokers.forEach((key, value) -> { sb.append("Broker ").append(key).append(" failed at ").append(toDateString(value)).append(",\t"); }); sb.setLength(sb.length() - 2); } sb.append("}}"); return sb.toString(); } }
BrokerFailures extends KafkaAnomaly { @Override public String toString() { StringBuilder sb = new StringBuilder().append("{"); sb.append(_fixable ? "Fixable " : " Unfixable "); sb.append("broker failures detected: {"); if (_failedBrokers != null) { _failedBrokers.forEach((key, value) -> { sb.append("Broker ").append(key).append(" failed at ").append(toDateString(value)).append(",\t"); }); sb.setLength(sb.length() - 2); } sb.append("}}"); return sb.toString(); } BrokerFailures(); }
BrokerFailures extends KafkaAnomaly { @Override public String toString() { StringBuilder sb = new StringBuilder().append("{"); sb.append(_fixable ? "Fixable " : " Unfixable "); sb.append("broker failures detected: {"); if (_failedBrokers != null) { _failedBrokers.forEach((key, value) -> { sb.append("Broker ").append(key).append(" failed at ").append(toDateString(value)).append(",\t"); }); sb.setLength(sb.length() - 2); } sb.append("}}"); return sb.toString(); } BrokerFailures(); Map<Integer, Long> failedBrokers(); boolean fixable(); @Override boolean fix(); @Override AnomalyType anomalyType(); @Override Supplier<String> reasonSupplier(); @Override String toString(); @SuppressWarnings("unchecked") @Override void configure(Map<String, ?> configs); }
BrokerFailures extends KafkaAnomaly { @Override public String toString() { StringBuilder sb = new StringBuilder().append("{"); sb.append(_fixable ? "Fixable " : " Unfixable "); sb.append("broker failures detected: {"); if (_failedBrokers != null) { _failedBrokers.forEach((key, value) -> { sb.append("Broker ").append(key).append(" failed at ").append(toDateString(value)).append(",\t"); }); sb.setLength(sb.length() - 2); } sb.append("}}"); return sb.toString(); } BrokerFailures(); Map<Integer, Long> failedBrokers(); boolean fixable(); @Override boolean fix(); @Override AnomalyType anomalyType(); @Override Supplier<String> reasonSupplier(); @Override String toString(); @SuppressWarnings("unchecked") @Override void configure(Map<String, ?> configs); }
@Test public void testSlackAlertWithNoWebhook() { _notifier = new MockSlackSelfHealingNotifier(MOCK_TIME); _notifier.alert(FAILURES, false, 1L, KafkaAnomalyType.BROKER_FAILURE); assertEquals(0, _notifier.getSlackMessageList().size()); }
@Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); static final String SLACK_SELF_HEALING_NOTIFIER_WEBHOOK; static final String SLACK_SELF_HEALING_NOTIFIER_ICON; static final String SLACK_SELF_HEALING_NOTIFIER_USER; static final String SLACK_SELF_HEALING_NOTIFIER_CHANNEL; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_ICON; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_USER; }
@Test public void testSlackAlertWithNoChannel() { _notifier = new MockSlackSelfHealingNotifier(MOCK_TIME); _notifier._slackWebhook = "http: _notifier.alert(FAILURES, false, 1L, KafkaAnomalyType.BROKER_FAILURE); assertEquals(0, _notifier.getSlackMessageList().size()); }
@Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); static final String SLACK_SELF_HEALING_NOTIFIER_WEBHOOK; static final String SLACK_SELF_HEALING_NOTIFIER_ICON; static final String SLACK_SELF_HEALING_NOTIFIER_USER; static final String SLACK_SELF_HEALING_NOTIFIER_CHANNEL; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_ICON; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_USER; }
@Test public void testSlackAlertWithDefaultOptions() { _notifier = new MockSlackSelfHealingNotifier(MOCK_TIME); _notifier._slackWebhook = "http: _notifier._slackChannel = "#dummy-channel"; _notifier.alert(FAILURES, false, 1L, KafkaAnomalyType.BROKER_FAILURE); assertEquals(1, _notifier.getSlackMessageList().size()); SlackMessage message = _notifier.getSlackMessageList().get(0); assertEquals("#dummy-channel", message.getChannel()); }
@Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); }
SlackSelfHealingNotifier extends SelfHealingNotifier { @Override public void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s detected %s. Self healing %s.%s", anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", toDateString(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } } SlackSelfHealingNotifier(); SlackSelfHealingNotifier(Time time); @Override void configure(Map<String, ?> config); @Override void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); static final String SLACK_SELF_HEALING_NOTIFIER_WEBHOOK; static final String SLACK_SELF_HEALING_NOTIFIER_ICON; static final String SLACK_SELF_HEALING_NOTIFIER_USER; static final String SLACK_SELF_HEALING_NOTIFIER_CHANNEL; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_ICON; static final String DEFAULT_SLACK_SELF_HEALING_NOTIFIER_USER; }
@Test public void testOnBrokerFailure() { final long failureTime1 = 200L; final long failureTime2 = 400L; final long startTime = 500L; KafkaCruiseControl mockKafkaCruiseControl = EasyMock.mock(KafkaCruiseControl.class); Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties(); KafkaCruiseControlConfig kafkaCruiseControlConfig = new KafkaCruiseControlConfig(props); EasyMock.expect(mockKafkaCruiseControl.config()).andReturn(kafkaCruiseControlConfig).atLeastOnce(); EasyMock.replay(mockKafkaCruiseControl); Time mockTime = new MockTime(0, startTime, TimeUnit.NANOSECONDS.convert(startTime, TimeUnit.MILLISECONDS)); TestingBrokerFailureAutoFixNotifier anomalyNotifier = new TestingBrokerFailureAutoFixNotifier(mockTime); anomalyNotifier.configure(Collections.singletonMap(SelfHealingNotifier.SELF_HEALING_BROKER_FAILURE_ENABLED_CONFIG, "true")); Map<Integer, Long> failedBrokers = new HashMap<>(); failedBrokers.put(1, failureTime1); failedBrokers.put(2, failureTime2); Map<String, Object> parameterConfigOverrides = new HashMap<>(4); parameterConfigOverrides.put(KAFKA_CRUISE_CONTROL_OBJECT_CONFIG, mockKafkaCruiseControl); parameterConfigOverrides.put(FAILED_BROKERS_OBJECT_CONFIG, failedBrokers); parameterConfigOverrides.put(ANOMALY_DETECTION_TIME_MS_OBJECT_CONFIG, failureTime1); parameterConfigOverrides.put(BROKER_FAILURES_FIXABLE_CONFIG, true); AnomalyNotificationResult result = anomalyNotifier.onBrokerFailure( kafkaCruiseControlConfig.getConfiguredInstance(AnomalyDetectorConfig.BROKER_FAILURES_CLASS_CONFIG, BrokerFailures.class, parameterConfigOverrides)); assertEquals(AnomalyNotificationResult.Action.CHECK, result.action()); assertEquals(SelfHealingNotifier.DEFAULT_ALERT_THRESHOLD_MS + failureTime1 - mockTime.milliseconds(), result.delay()); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.BROKER_FAILURE)); mockTime.sleep(result.delay() - 1); result = anomalyNotifier.onBrokerFailure( kafkaCruiseControlConfig.getConfiguredInstance(AnomalyDetectorConfig.BROKER_FAILURES_CLASS_CONFIG, BrokerFailures.class, parameterConfigOverrides)); assertEquals(AnomalyNotificationResult.Action.CHECK, result.action()); assertEquals(1, result.delay()); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.BROKER_FAILURE)); mockTime.sleep(1); anomalyNotifier.resetAlert(KafkaAnomalyType.BROKER_FAILURE); result = anomalyNotifier.onBrokerFailure( kafkaCruiseControlConfig.getConfiguredInstance(AnomalyDetectorConfig.BROKER_FAILURES_CLASS_CONFIG, BrokerFailures.class, parameterConfigOverrides)); assertEquals(AnomalyNotificationResult.Action.CHECK, result.action()); assertEquals(SelfHealingNotifier.DEFAULT_AUTO_FIX_THRESHOLD_MS + failureTime1 - mockTime.milliseconds(), result.delay()); assertTrue(anomalyNotifier._alertCalled.get(KafkaAnomalyType.BROKER_FAILURE)); mockTime.sleep(result.delay() - 1); anomalyNotifier.resetAlert(KafkaAnomalyType.BROKER_FAILURE); result = anomalyNotifier.onBrokerFailure( kafkaCruiseControlConfig.getConfiguredInstance(AnomalyDetectorConfig.BROKER_FAILURES_CLASS_CONFIG, BrokerFailures.class, parameterConfigOverrides)); assertEquals(AnomalyNotificationResult.Action.CHECK, result.action()); assertEquals(1, result.delay()); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.BROKER_FAILURE)); assertFalse(anomalyNotifier._autoFixTriggered.get(KafkaAnomalyType.BROKER_FAILURE)); mockTime.sleep(1); anomalyNotifier.resetAlert(KafkaAnomalyType.BROKER_FAILURE); result = anomalyNotifier.onBrokerFailure( kafkaCruiseControlConfig.getConfiguredInstance(AnomalyDetectorConfig.BROKER_FAILURES_CLASS_CONFIG, BrokerFailures.class, parameterConfigOverrides)); assertEquals(AnomalyNotificationResult.Action.FIX, result.action()); assertEquals(-1L, result.delay()); assertTrue(anomalyNotifier._alertCalled.get(KafkaAnomalyType.BROKER_FAILURE)); assertTrue(anomalyNotifier._autoFixTriggered.get(KafkaAnomalyType.BROKER_FAILURE)); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.GOAL_VIOLATION)); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.METRIC_ANOMALY)); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.DISK_FAILURE)); assertFalse(anomalyNotifier._alertCalled.get(KafkaAnomalyType.TOPIC_ANOMALY)); }
@Override public AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures) { long earliestFailureTimeMs = Long.MAX_VALUE; for (long t : brokerFailures.failedBrokers().values()) { earliestFailureTimeMs = Math.min(earliestFailureTimeMs, t); } long nowMs = _time.milliseconds(); long alertTimeMs = earliestFailureTimeMs + _brokerFailureAlertThresholdMs; long selfHealingTimeMs = earliestFailureTimeMs + _selfHealingThresholdMs; AnomalyNotificationResult result = null; if (nowMs < alertTimeMs) { long delayMs = alertTimeMs - nowMs; result = AnomalyNotificationResult.check(delayMs); } else if (nowMs < selfHealingTimeMs) { if (hasNewFailureToAlert(brokerFailures, false)) { alert(brokerFailures, false, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } long delay = selfHealingTimeMs - nowMs; result = AnomalyNotificationResult.check(delay); } else { boolean autoFixTriggered = _selfHealingEnabled.get(KafkaAnomalyType.BROKER_FAILURE) && brokerFailures.fixable(); if (hasNewFailureToAlert(brokerFailures, autoFixTriggered)) { alert(brokerFailures, autoFixTriggered, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } result = autoFixTriggered ? AnomalyNotificationResult.fix() : AnomalyNotificationResult.ignore(); } return result; }
SelfHealingNotifier implements AnomalyNotifier { @Override public AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures) { long earliestFailureTimeMs = Long.MAX_VALUE; for (long t : brokerFailures.failedBrokers().values()) { earliestFailureTimeMs = Math.min(earliestFailureTimeMs, t); } long nowMs = _time.milliseconds(); long alertTimeMs = earliestFailureTimeMs + _brokerFailureAlertThresholdMs; long selfHealingTimeMs = earliestFailureTimeMs + _selfHealingThresholdMs; AnomalyNotificationResult result = null; if (nowMs < alertTimeMs) { long delayMs = alertTimeMs - nowMs; result = AnomalyNotificationResult.check(delayMs); } else if (nowMs < selfHealingTimeMs) { if (hasNewFailureToAlert(brokerFailures, false)) { alert(brokerFailures, false, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } long delay = selfHealingTimeMs - nowMs; result = AnomalyNotificationResult.check(delay); } else { boolean autoFixTriggered = _selfHealingEnabled.get(KafkaAnomalyType.BROKER_FAILURE) && brokerFailures.fixable(); if (hasNewFailureToAlert(brokerFailures, autoFixTriggered)) { alert(brokerFailures, autoFixTriggered, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } result = autoFixTriggered ? AnomalyNotificationResult.fix() : AnomalyNotificationResult.ignore(); } return result; } }
SelfHealingNotifier implements AnomalyNotifier { @Override public AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures) { long earliestFailureTimeMs = Long.MAX_VALUE; for (long t : brokerFailures.failedBrokers().values()) { earliestFailureTimeMs = Math.min(earliestFailureTimeMs, t); } long nowMs = _time.milliseconds(); long alertTimeMs = earliestFailureTimeMs + _brokerFailureAlertThresholdMs; long selfHealingTimeMs = earliestFailureTimeMs + _selfHealingThresholdMs; AnomalyNotificationResult result = null; if (nowMs < alertTimeMs) { long delayMs = alertTimeMs - nowMs; result = AnomalyNotificationResult.check(delayMs); } else if (nowMs < selfHealingTimeMs) { if (hasNewFailureToAlert(brokerFailures, false)) { alert(brokerFailures, false, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } long delay = selfHealingTimeMs - nowMs; result = AnomalyNotificationResult.check(delay); } else { boolean autoFixTriggered = _selfHealingEnabled.get(KafkaAnomalyType.BROKER_FAILURE) && brokerFailures.fixable(); if (hasNewFailureToAlert(brokerFailures, autoFixTriggered)) { alert(brokerFailures, autoFixTriggered, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } result = autoFixTriggered ? AnomalyNotificationResult.fix() : AnomalyNotificationResult.ignore(); } return result; } SelfHealingNotifier(); SelfHealingNotifier(Time time); }
SelfHealingNotifier implements AnomalyNotifier { @Override public AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures) { long earliestFailureTimeMs = Long.MAX_VALUE; for (long t : brokerFailures.failedBrokers().values()) { earliestFailureTimeMs = Math.min(earliestFailureTimeMs, t); } long nowMs = _time.milliseconds(); long alertTimeMs = earliestFailureTimeMs + _brokerFailureAlertThresholdMs; long selfHealingTimeMs = earliestFailureTimeMs + _selfHealingThresholdMs; AnomalyNotificationResult result = null; if (nowMs < alertTimeMs) { long delayMs = alertTimeMs - nowMs; result = AnomalyNotificationResult.check(delayMs); } else if (nowMs < selfHealingTimeMs) { if (hasNewFailureToAlert(brokerFailures, false)) { alert(brokerFailures, false, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } long delay = selfHealingTimeMs - nowMs; result = AnomalyNotificationResult.check(delay); } else { boolean autoFixTriggered = _selfHealingEnabled.get(KafkaAnomalyType.BROKER_FAILURE) && brokerFailures.fixable(); if (hasNewFailureToAlert(brokerFailures, autoFixTriggered)) { alert(brokerFailures, autoFixTriggered, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } result = autoFixTriggered ? AnomalyNotificationResult.fix() : AnomalyNotificationResult.ignore(); } return result; } SelfHealingNotifier(); SelfHealingNotifier(Time time); @Override AnomalyNotificationResult onGoalViolation(GoalViolations goalViolations); @Override AnomalyNotificationResult onMetricAnomaly(KafkaMetricAnomaly metricAnomaly); @Override AnomalyNotificationResult onTopicAnomaly(TopicAnomaly topicAnomaly); @Override AnomalyNotificationResult onMaintenanceEvent(MaintenanceEvent maintenanceEvent); @Override AnomalyNotificationResult onDiskFailure(DiskFailures diskFailures); @Override Map<AnomalyType, Boolean> selfHealingEnabled(); @Override synchronized boolean setSelfHealingFor(AnomalyType anomalyType, boolean isSelfHealingEnabled); @Override synchronized Map<AnomalyType, Float> selfHealingEnabledRatio(); @Override AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures); void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); @Override void configure(Map<String, ?> config); @Override long uptimeMs(long nowMs); }
SelfHealingNotifier implements AnomalyNotifier { @Override public AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures) { long earliestFailureTimeMs = Long.MAX_VALUE; for (long t : brokerFailures.failedBrokers().values()) { earliestFailureTimeMs = Math.min(earliestFailureTimeMs, t); } long nowMs = _time.milliseconds(); long alertTimeMs = earliestFailureTimeMs + _brokerFailureAlertThresholdMs; long selfHealingTimeMs = earliestFailureTimeMs + _selfHealingThresholdMs; AnomalyNotificationResult result = null; if (nowMs < alertTimeMs) { long delayMs = alertTimeMs - nowMs; result = AnomalyNotificationResult.check(delayMs); } else if (nowMs < selfHealingTimeMs) { if (hasNewFailureToAlert(brokerFailures, false)) { alert(brokerFailures, false, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } long delay = selfHealingTimeMs - nowMs; result = AnomalyNotificationResult.check(delay); } else { boolean autoFixTriggered = _selfHealingEnabled.get(KafkaAnomalyType.BROKER_FAILURE) && brokerFailures.fixable(); if (hasNewFailureToAlert(brokerFailures, autoFixTriggered)) { alert(brokerFailures, autoFixTriggered, selfHealingTimeMs, KafkaAnomalyType.BROKER_FAILURE); } result = autoFixTriggered ? AnomalyNotificationResult.fix() : AnomalyNotificationResult.ignore(); } return result; } SelfHealingNotifier(); SelfHealingNotifier(Time time); @Override AnomalyNotificationResult onGoalViolation(GoalViolations goalViolations); @Override AnomalyNotificationResult onMetricAnomaly(KafkaMetricAnomaly metricAnomaly); @Override AnomalyNotificationResult onTopicAnomaly(TopicAnomaly topicAnomaly); @Override AnomalyNotificationResult onMaintenanceEvent(MaintenanceEvent maintenanceEvent); @Override AnomalyNotificationResult onDiskFailure(DiskFailures diskFailures); @Override Map<AnomalyType, Boolean> selfHealingEnabled(); @Override synchronized boolean setSelfHealingFor(AnomalyType anomalyType, boolean isSelfHealingEnabled); @Override synchronized Map<AnomalyType, Float> selfHealingEnabledRatio(); @Override AnomalyNotificationResult onBrokerFailure(BrokerFailures brokerFailures); void alert(Object anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType); @Override void configure(Map<String, ?> config); @Override long uptimeMs(long nowMs); static final String BROKER_FAILURE_ALERT_THRESHOLD_MS_CONFIG; static final String SELF_HEALING_ENABLED_CONFIG; static final String SELF_HEALING_BROKER_FAILURE_ENABLED_CONFIG; static final String SELF_HEALING_GOAL_VIOLATION_ENABLED_CONFIG; static final String SELF_HEALING_METRIC_ANOMALY_ENABLED_CONFIG; static final String SELF_HEALING_DISK_FAILURE_ENABLED_CONFIG; static final String SELF_HEALING_TOPIC_ANOMALY_ENABLED_CONFIG; static final String SELF_HEALING_MAINTENANCE_EVENT_ENABLED_CONFIG; static final String BROKER_FAILURE_SELF_HEALING_THRESHOLD_MS_CONFIG; }
@Test public void testAggregationOption3() { MetricSampleAggregator<String, IntegerEntity> aggregator = prepareCompletenessTestEnv(); AggregationOptions<String, IntegerEntity> options = new AggregationOptions<>(0.0, 0.5, NUM_WINDOWS, 5, new HashSet<>(Arrays.asList(ENTITY1, ENTITY2, ENTITY3)), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, IntegerEntity> completeness = aggregator.completeness(-1, Long.MAX_VALUE, options); assertEquals(17, completeness.validWindowIndices().size()); assertFalse(completeness.validWindowIndices().contains(3L)); assertFalse(completeness.validWindowIndices().contains(4L)); assertFalse(completeness.validWindowIndices().contains(20L)); assertEquals(2, completeness.validEntities().size()); assertTrue(completeness.validEntities().contains(ENTITY1)); assertTrue(completeness.validEntities().contains(ENTITY3)); assertEquals(1, completeness.validEntityGroups().size()); assertTrue(completeness.validEntityGroups().contains(ENTITY3.group())); assertCompletenessByWindowIndex(completeness); }
public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
MetricSampleAggregator extends LongGenerationed { public MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options) { _windowRollingLock.lock(); try { long fromWindowIndex = Math.max(windowIndex(from), _oldestWindowIndex); long toWindowIndex = Math.min(windowIndex(to), _currentWindowIndex - 1); if (fromWindowIndex > _currentWindowIndex || toWindowIndex < _oldestWindowIndex) { return new MetricSampleCompleteness<>(generation(), _windowMs); } maybeUpdateAggregatorState(); return _aggregatorState.completeness(fromWindowIndex, toWindowIndex, interpretAggregationOptions(options), generation()); } finally { _windowRollingLock.unlock(); } } MetricSampleAggregator(int numWindows, long windowMs, byte minSamplesPerWindow, int completenessCacheSize, MetricDef metricDef); boolean addSample(MetricSample<G, E> sample); MetricSampleAggregationResult<G, E> aggregate(long from, long to, AggregationOptions<G, E> options); Map<E, ValuesAndExtrapolations> peekCurrentWindow(); MetricSampleCompleteness<G, E> completeness(long from, long to, AggregationOptions<G, E> options); List<Long> availableWindows(); int numAvailableWindows(); int numAvailableWindows(long from, long to); List<Long> allWindows(); Long earliestWindow(); int numSamples(); void retainEntities(Set<E> entities); void removeEntities(Set<E> entities); void retainEntityGroup(Set<G> entityGroups); void removeEntityGroup(Set<G> entityGroups); void clear(); long monitoringPeriodMs(); }
@Test public void testSlackMessageJsonFormat() { String expectedJson = "{\"username\" : \"userA\",\"text\" : \"cc alert\",\"icon_emoji\" : \":information_source:" + "\",\"channel\" : \"#cc-alerts\"}"; assertEquals(expectedJson, new SlackMessage("userA", "cc alert", ":information_source:", "#cc-alerts").toString()); }
@Override public String toString() { return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"") + ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"") + ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"") + ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}"; }
SlackMessage implements Serializable { @Override public String toString() { return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"") + ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"") + ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"") + ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}"; } }
SlackMessage implements Serializable { @Override public String toString() { return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"") + ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"") + ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"") + ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}"; } SlackMessage(String username, String text, String iconEmoji, String channel); }
SlackMessage implements Serializable { @Override public String toString() { return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"") + ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"") + ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"") + ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}"; } SlackMessage(String username, String text, String iconEmoji, String channel); String getUsername(); String getText(); String getIconEmoji(); String getChannel(); @Override String toString(); }
SlackMessage implements Serializable { @Override public String toString() { return "{\"username\" : " + (_username == null ? null : "\"" + _username + "\"") + ",\"text\" : " + (_text == null ? null : "\"" + _text + "\"") + ",\"icon_emoji\" : " + (_iconEmoji == null ? null : "\"" + _iconEmoji + "\"") + ",\"channel\" : " + (_channel == null ? null : "\"" + _channel + "\"") + "}"; } SlackMessage(String username, String text, String iconEmoji, String channel); String getUsername(); String getText(); String getIconEmoji(); String getChannel(); @Override String toString(); }