hexsha
stringlengths 40
40
| size
int64 2
1.05M
| content
stringlengths 2
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
56037244ac823d617edb495ff29f0c6b8e4d3632 | 147 | //! Documentation for advanced features of this crate
pub mod prec_climbing;
pub mod rule_aliasing;
pub mod rule_shortcutting;
pub mod user_data;
| 21 | 53 | 0.809524 |
8a89f148dd261d9ab1a8a612a6151ffebb90d5f6 | 69 | mod model;
mod routes;
pub use model::*;
pub use routes::init_routes; | 17.25 | 28 | 0.73913 |
efa9df159470f2f1b274437caa57dd16d40a00fb | 3,115 | use crate::error::AppError;
use solana_program::program_error::ProgramError;
use std::convert::TryInto;
#[derive(Clone, Debug, PartialEq)]
pub enum AppInstruction {
InitializePool {
reserve_s: u64,
reserve_a: u64,
reserve_b: u64,
},
AddLiquidity {
delta_s: u64,
delta_a: u64,
delta_b: u64,
},
RemoveLiquidity {
lpt: u64,
},
Swap {
amount: u64,
limit: u64,
},
FreezePool,
ThawPool,
Earn {
amount: u64,
},
TransferPoolOwnership,
}
impl AppInstruction {
pub fn unpack(instruction: &[u8]) -> Result<Self, ProgramError> {
let (&tag, rest) = instruction
.split_first()
.ok_or(AppError::InvalidInstruction)?;
Ok(match tag {
0 => {
let reserve_s = rest
.get(..8)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
let reserve_a = rest
.get(8..16)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
let reserve_b = rest
.get(16..24)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
Self::InitializePool {
reserve_s,
reserve_a,
reserve_b,
}
}
1 => {
let delta_s = rest
.get(..8)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
let delta_a = rest
.get(8..16)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
let delta_b = rest
.get(16..24)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
Self::AddLiquidity {
delta_s,
delta_a,
delta_b,
}
}
2 => {
let lpt = rest
.get(..8)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
Self::RemoveLiquidity { lpt }
}
3 => {
let amount = rest
.get(..8)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
let limit = rest
.get(8..16)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
Self::Swap { amount, limit }
}
4 => Self::FreezePool,
5 => Self::ThawPool,
6 => {
let amount = rest
.get(..8)
.and_then(|slice| slice.try_into().ok())
.map(u64::from_le_bytes)
.ok_or(AppError::InvalidInstruction)?;
Self::Earn { amount }
}
7 => Self::TransferPoolOwnership,
_ => return Err(AppError::InvalidInstruction.into()),
})
}
}
| 26.623932 | 67 | 0.532263 |
71e5afeb31be6e6bb9b64267387e0efc6aafd9c6 | 12,494 | use crate::onnx::{ModelProto, NodeProto, TensorProto, ValueInfoProto};
use crate::utils::{DataTypeError, ScalarType, Shape};
use std::borrow::Cow;
use std::fmt::Debug;
use std::hash::Hash;
use std::ptr;
use std::{collections::HashMap, sync::Arc};
use thiserror::Error;
#[derive(Clone)]
pub struct OperatorDefinition<'model> {
pub(crate) proto: Cow<'model, NodeProto>,
pub(crate) output_shapes: Vec<Shape>,
}
impl<'model> OperatorDefinition<'model> {
pub fn from(
node: Cow<'model, NodeProto>,
value_shapes: &HashMap<&'model str, Shape>,
) -> Result<OperatorDefinition<'model>, IrError> {
let mut output_shapes: Vec<Shape> = Vec::with_capacity(node.get_output().len());
for output_name in node.get_output() {
if !value_shapes.contains_key(output_name.as_str()) {
return Err(IrError::OutputNodeNotFound(output_name.to_string()));
}
output_shapes.push(value_shapes[&output_name.as_str()].clone());
}
Ok(OperatorDefinition {
proto: node,
output_shapes,
})
}
}
#[derive(Clone)]
pub enum NodeDefinition<'model> {
Operator(Box<OperatorDefinition<'model>>),
Tensor(Box<Cow<'model, TensorProto>>),
Input(&'model ValueInfoProto),
Outputs { names: Vec<String> },
Missing, // A missing input (optional)
}
static MISSING_OPTIONAL_INPUT: NodeDefinition<'static> = NodeDefinition::Missing;
#[derive(Clone)]
pub struct Input<'model> {
pub source_node: Arc<Node<'model>>,
pub output_index: usize,
}
pub struct Node<'model> {
pub definition: NodeDefinition<'model>,
pub inputs: Vec<Input<'model>>,
}
#[derive(Debug, Error)]
pub enum IrError {
#[error("output node for output {0} not found")]
OutputNodeNotFound(String),
#[error("could not find node corresponding to input {input_name} of node {target_node_name}")]
InputNodeNotFound {
target_node_name: String,
input_name: String,
},
#[error("issue with data types: {0}")]
Type(#[from] DataTypeError),
}
impl<'m> NodeDefinition<'m> {
pub fn get_name(&self) -> Cow<'_, str> {
match self {
NodeDefinition::Operator(op_def) => Cow::from(op_def.proto.get_name()),
NodeDefinition::Tensor(t) => Cow::from(t.get_name()),
NodeDefinition::Input(i) => Cow::from(i.get_name()),
NodeDefinition::Outputs { .. } => Cow::from(" "),
NodeDefinition::Missing => Cow::from(""),
}
}
pub fn output_name(&self, output_index: usize) -> Cow<'_, str> {
match self {
NodeDefinition::Operator(op_def) => {
Cow::Borrowed(&op_def.proto.get_output()[output_index])
}
NodeDefinition::Tensor(proto) => Cow::from(proto.get_name()),
NodeDefinition::Input(proto) => Cow::from(proto.get_name()),
NodeDefinition::Outputs { .. } => panic!("can't get output name for outputs node"),
NodeDefinition::Missing => panic!("can't get output name for missing node"),
}
}
}
impl<'model> Node<'model> {
pub fn new(variant: NodeDefinition<'model>) -> Node<'model> {
Node {
definition: variant,
inputs: vec![],
}
}
pub fn definition(&self) -> &NodeDefinition<'model> {
&self.definition
}
/// Construct part of the intermediate representation tree for the indicated node.
pub fn from_node<'a>(
model: &'model ModelProto,
node: Cow<'model, NodeProto>,
value_shapes: &HashMap<&'model str, Shape>,
node_definitions_by_output: &'a HashMap<String, NodeDefinition<'model>>,
nodes_by_name: &mut HashMap<String, Arc<Node<'model>>>,
) -> Result<Arc<Node<'model>>, IrError> {
let node_name = node.get_name();
// Did we already translate this node before?
if nodes_by_name.contains_key(node_name) {
let n = nodes_by_name.get(node_name).unwrap();
return Ok(n.clone());
}
let inputs: Result<Vec<Input<'model>>, IrError> = node
.get_input()
.iter()
.map(|input_name: &'model String| {
let source_node_definition = node_definitions_by_output
.get(input_name)
.unwrap_or(&MISSING_OPTIONAL_INPUT);
Ok(match source_node_definition {
// The source is another op - continue translating that node
NodeDefinition::Operator(source_node_proto) => Input {
source_node: Node::from_node(
model,
source_node_proto.proto.clone(),
value_shapes,
node_definitions_by_output,
nodes_by_name,
)?,
output_index: source_node_proto
.proto
.get_output()
.iter()
.position(|s| s == input_name)
.ok_or_else(|| IrError::OutputNodeNotFound(input_name.to_string()))?,
},
_ => {
// The source is an initializer or model onput
let source_name = source_node_definition.get_name();
Input {
output_index: 0,
// Did we already translate this node?
source_node: match nodes_by_name.get(&source_name.to_string()) {
Some(node) => node.clone(),
None => {
let node = Arc::new(Node::new(source_node_definition.clone()));
nodes_by_name.insert(source_name.into(), node.clone());
node
}
},
}
}
})
})
.collect();
let translated = Arc::new(Node {
definition: NodeDefinition::Operator(Box::new(OperatorDefinition::from(
node.clone(),
value_shapes,
)?)),
inputs: inputs?,
});
nodes_by_name.insert(node_name.to_string(), translated.clone());
Ok(translated)
}
/// Construct an intermediate representation graph for calculating the output with the specified name.
pub fn from_model(
model: &'model ModelProto,
outputs: Option<&[String]>,
) -> Result<Arc<Node<'model>>, IrError> {
// Collect value shapes
let mut value_shapes: HashMap<&'model str, Shape> = HashMap::new();
for vi in model.get_graph().get_value_info() {
value_shapes.insert(vi.get_name(), vi.get_shape()?);
}
for vi in model.get_graph().get_output() {
let output_name = vi.get_name();
if !output_name.is_empty() {
value_shapes.insert(output_name, vi.get_shape()?);
}
}
// Sort nodes by output nodes
let mut node_definitions_by_output = HashMap::<String, NodeDefinition<'model>>::new();
for node in model.get_graph().get_node().iter() {
let node_def = NodeDefinition::Operator(Box::new(OperatorDefinition::from(
Cow::Borrowed(node),
&value_shapes,
)?));
for output in node.get_output() {
if !output.is_empty() {
node_definitions_by_output.insert(output.to_string(), node_def.clone());
}
}
}
// Collect intializer info
for initializer in model.get_graph().get_initializer().iter() {
log::info!("Initializer {}", initializer.get_name());
node_definitions_by_output.insert(
initializer.get_name().to_string(),
NodeDefinition::Tensor(Box::new(Cow::Borrowed(initializer))),
);
}
let output_names: Vec<String> = match outputs {
Some(outputs) => outputs.to_vec(),
None => model
.get_graph()
.get_output()
.iter()
.map(|x| x.get_name().to_string())
.collect(),
};
// Collect input name
for input in model.get_graph().get_input().iter() {
if !node_definitions_by_output.contains_key(input.get_name()) {
log::info!("Input {}", input.get_name());
node_definitions_by_output
.insert(input.get_name().to_string(), NodeDefinition::Input(input));
} else {
log::info!(
"Skipping input definition {}: already defined",
input.get_name()
);
}
}
let mut nodes_by_name = HashMap::new();
let output_nodes: Result<Vec<Input<'model>>, IrError> = output_names
.iter()
.map(|output_name| {
let output_node = model
.get_graph()
.get_node()
.iter()
.find(|x| -> bool { x.get_output().contains(output_name) })
.ok_or_else(|| IrError::OutputNodeNotFound(output_name.clone()))?;
let source_node = Node::<'model>::from_node(
model,
Cow::Borrowed(output_node),
&value_shapes,
&node_definitions_by_output,
&mut nodes_by_name,
)?;
let output_index = output_node
.get_output()
.iter()
.position(|s| s == output_name)
.ok_or_else(|| IrError::OutputNodeNotFound(output_name.clone()))?;
Ok(Input {
source_node,
output_index,
})
})
.collect();
Ok(Arc::new(Node {
definition: NodeDefinition::Outputs {
names: output_names,
},
inputs: output_nodes?,
}))
}
pub fn output_shape(&self, output_index: usize) -> Result<Shape, IrError> {
Ok(match (&self.definition, output_index) {
(NodeDefinition::Operator(op_def), index) => op_def.output_shapes[index].clone(),
(NodeDefinition::Tensor(tensor_proto), 0) => Shape::from(
ScalarType::from_i32(tensor_proto.get_data_type())?,
tensor_proto.get_dims(),
),
(NodeDefinition::Input(input_proto), 0) => input_proto.get_shape()?,
(NodeDefinition::Outputs { .. }, _) => panic!("output node has no outputs!"),
(_, _) => panic!("node has no output at index {}", output_index),
})
}
}
impl<'model> Debug for NodeDefinition<'model> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NodeDefinition::Operator(def) => {
write!(
f,
"op: {} ({})",
def.proto.get_name(),
def.proto.get_op_type()
)
}
NodeDefinition::Tensor(def) => write!(f, "tensor {}", def.get_name()),
NodeDefinition::Input(def) => write!(f, "input {}", def.get_name()),
NodeDefinition::Outputs { .. } => write!(f, "outputs"),
NodeDefinition::Missing => write!(f, "missing (optional)"),
}
}
}
/// Wrap an Arc<Node> in a struct so we can implement pointer-based comparison for it, and use them as keys in a HashSet/HashMap
#[derive(Clone)]
pub struct NodeIdentifier<'model>(Arc<Node<'model>>);
impl<'model> Hash for NodeIdentifier<'model> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
ptr::hash(Arc::as_ptr(&self.0), state)
}
}
impl<'model> PartialEq for NodeIdentifier<'model> {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl<'model> Eq for NodeIdentifier<'model> {}
impl<'model> Node<'model> {
pub fn identifier(self: &Arc<Self>) -> NodeIdentifier<'model> {
NodeIdentifier(self.clone())
}
}
| 36.425656 | 128 | 0.525052 |
fbac5548947e098fe53f95da25c2936745128ce6 | 529 | use bitmex::rest::BitMEXRest;
use bitmex::rest::GetLiquidationRequest;
use failure::Fallible;
use log::debug;
use std::env::var;
use tokio::runtime::Runtime;
#[test]
fn get_liquidation() -> Fallible<()> {
let _ = dotenv::dotenv();
let _ = env_logger::try_init();
let rt = Runtime::new()?;
let bm = BitMEXRest::with_credential(&var("BITMEX_KEY")?, &var("BITMEX_SECRET")?);
let fut = bm.request(GetLiquidationRequest {
..Default::default()
});
debug!("{:?}", rt.block_on(fut)?);
Ok(())
}
| 24.045455 | 86 | 0.625709 |
ebc99f6bc50c2b8f527e0f6f5cc1ed9789ac2372 | 2,729 | // iterators3.rs
// This is a bigger exercise than most of the others! You can do it!
// Here is your mission, should you choose to accept it:
// 1. Complete the divide function to get the first four tests to pass.
// 2. Get the remaining tests to pass by completing the result_with_list and
// list_of_results functions.
// Execute `rustlings hint iterators3` to get some hints!
#[derive(Debug, PartialEq, Eq)]
pub enum DivisionError {
NotDivisible(NotDivisibleError),
DivideByZero,
}
#[derive(Debug, PartialEq, Eq)]
pub struct NotDivisibleError {
dividend: i32,
divisor: i32,
}
// Calculate `a` divided by `b` if `a` is evenly divisible by `b`.
// Otherwise, return a suitable error.
pub fn divide(a: i32, b: i32) -> Result<i32, DivisionError> {
if b == 0 {
return Err(DivisionError::DivideByZero);
}
if a % b != 0 {
return Err(DivisionError::NotDivisible(NotDivisibleError {
dividend: a,
divisor: b
}));
}
Ok(a / b)
}
// Complete the function and return a value of the correct type so the test passes.
// Desired output: Ok([1, 11, 1426, 3])
fn result_with_list() -> Result<Vec<i32>, DivisionError> {
let numbers = vec![27, 297, 38502, 81];
let division_results: Vec<i32> = numbers.into_iter()
.map(|n| divide(n, 27))
.filter(|r|r.is_ok())
.map(|r|r.unwrap())
.collect();
Ok(division_results)
}
// Complete the function and return a value of the correct type so the test passes.
// Desired output: [Ok(1), Ok(11), Ok(1426), Ok(3)]
fn list_of_results() -> Vec<Result<i32, DivisionError>> {
let numbers = vec![27, 297, 38502, 81];
let division_results: Vec<Result<i32, DivisionError>> = numbers.into_iter()
.map(|n| divide(n, 27))
.collect();
division_results
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_success() {
assert_eq!(divide(81, 9), Ok(9));
}
#[test]
fn test_not_divisible() {
assert_eq!(
divide(81, 6),
Err(DivisionError::NotDivisible(NotDivisibleError {
dividend: 81,
divisor: 6
}))
);
}
#[test]
fn test_divide_by_0() {
assert_eq!(divide(81, 0), Err(DivisionError::DivideByZero));
}
#[test]
fn test_divide_0_by_something() {
assert_eq!(divide(0, 81), Ok(0));
}
#[test]
fn test_result_with_list() {
assert_eq!(format!("{:?}", result_with_list()), "Ok([1, 11, 1426, 3])");
}
#[test]
fn test_list_of_results() {
assert_eq!(
format!("{:?}", list_of_results()),
"[Ok(1), Ok(11), Ok(1426), Ok(3)]"
);
}
}
| 26.495146 | 83 | 0.590326 |
fef9bc94b4e29247d02310cd2e28f5d8d83527dd | 6,830 | use glutin::event::{ElementState, Event, KeyEvent, WindowEvent};
use glutin::keyboard::Key;
use winit::platform::modifier_supplement::KeyEventExtModifierSupplement;
use crate::bridge::UiCommand;
use crate::channel_utils::LoggingTx;
pub struct KeyboardManager {
command_sender: LoggingTx<UiCommand>,
shift: bool,
ctrl: bool,
alt: bool,
logo: bool,
ignore_input_this_frame: bool,
queued_key_events: Vec<KeyEvent>,
}
impl KeyboardManager {
pub fn new(command_sender: LoggingTx<UiCommand>) -> KeyboardManager {
KeyboardManager {
command_sender,
shift: false,
ctrl: false,
alt: false,
logo: false,
ignore_input_this_frame: false,
queued_key_events: Vec::new(),
}
}
pub fn handle_event(&mut self, event: &Event<()>) {
match event {
Event::WindowEvent {
event: WindowEvent::Focused(focused),
..
} => {
// The window was just focused, so ignore keyboard events that were submitted this
// frame.
self.ignore_input_this_frame = *focused;
}
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
event: key_event, ..
},
..
} => {
// Store the event so that we can ignore it properly if the window was just
// focused.
self.queued_key_events.push(key_event.clone());
}
Event::WindowEvent {
event: WindowEvent::ModifiersChanged(modifiers),
..
} => {
// Record the modifer states so that we can properly add them to the keybinding
// text
self.shift = modifiers.shift_key();
self.ctrl = modifiers.control_key();
self.alt = modifiers.alt_key();
self.logo = modifiers.super_key();
}
Event::MainEventsCleared => {
// And the window wasn't just focused.
if !self.ignore_input_this_frame {
// If we have a keyboard event this frame
for key_event in self.queued_key_events.iter() {
// And a key was pressed
if key_event.state == ElementState::Pressed {
if let Some(keybinding) = self.maybe_get_keybinding(key_event) {
self.command_sender
.send(UiCommand::Keyboard(keybinding))
.expect("Could not send keyboard ui command");
}
}
}
}
// Regardless of whether this was a valid keyboard input or not, rest ignoring and
// whatever event was queued.
self.ignore_input_this_frame = false;
self.queued_key_events.clear();
}
_ => {}
}
}
fn maybe_get_keybinding(&self, key_event: &KeyEvent) -> Option<String> {
// Determine if this key event represents a key which won't ever
// present text.
if let Some(key_text) = is_control_key(key_event.logical_key) {
Some(self.format_keybinding_string(true, true, key_text))
} else {
let is_dead_key =
key_event.text_with_all_modifiers().is_some() && key_event.text.is_none();
let key_text = if (self.alt || is_dead_key) && cfg!(target_os = "macos") {
key_event.text_with_all_modifiers()
} else {
key_event.text
};
if let Some(key_text) = key_text {
// This is not a control key, so we rely upon winit to determine if
// this is a deadkey or not.
let keybinding_string = if let Some(escaped_text) = is_special(key_text) {
self.format_keybinding_string(true, false, escaped_text)
} else {
self.format_keybinding_string(false, false, key_text)
};
Some(keybinding_string)
} else {
None
}
}
}
fn format_keybinding_string(&self, special: bool, use_shift: bool, text: &str) -> String {
let special = special || self.ctrl || use_alt(self.alt) || use_logo(self.logo);
let open = or_empty(special, "<");
let shift = or_empty(self.shift && use_shift, "S-");
let ctrl = or_empty(self.ctrl, "C-");
let alt = or_empty(use_alt(self.alt), "M-");
let logo = or_empty(use_logo(self.logo), "D-");
let close = or_empty(special, ">");
format!("{}{}{}{}{}{}{}", open, shift, ctrl, alt, logo, text, close)
}
}
#[cfg(not(target_os = "windows"))]
fn use_logo(logo: bool) -> bool {
logo
}
// The Windows key is used for OS-level shortcuts,
// so we want to ignore the logo key on this platform.
#[cfg(target_os = "windows")]
fn use_logo(_: bool) -> bool {
false
}
#[cfg(not(target_os = "macos"))]
fn use_alt(alt: bool) -> bool {
alt
}
// The option or alt key is used on Macos for character set changes
// and does not operate the same as other systems.
#[cfg(target_os = "macos")]
fn use_alt(_: bool) -> bool {
false
}
fn or_empty(condition: bool, text: &str) -> &str {
if condition {
text
} else {
""
}
}
fn is_control_key(key: Key<'static>) -> Option<&str> {
match key {
Key::Backspace => Some("BS"),
Key::Escape => Some("Esc"),
Key::Delete => Some("Del"),
Key::ArrowUp => Some("Up"),
Key::ArrowDown => Some("Down"),
Key::ArrowLeft => Some("Left"),
Key::ArrowRight => Some("Right"),
Key::F1 => Some("F1"),
Key::F2 => Some("F2"),
Key::F3 => Some("F3"),
Key::F4 => Some("F4"),
Key::F5 => Some("F5"),
Key::F6 => Some("F6"),
Key::F7 => Some("F7"),
Key::F8 => Some("F8"),
Key::F9 => Some("F9"),
Key::F10 => Some("F10"),
Key::F11 => Some("F11"),
Key::F12 => Some("F12"),
Key::Insert => Some("Insert"),
Key::Home => Some("Home"),
Key::End => Some("End"),
Key::PageUp => Some("PageUp"),
Key::PageDown => Some("PageDown"),
_ => None,
}
}
fn is_special(text: &str) -> Option<&str> {
match text {
" " => Some("Space"),
"<" => Some("lt"),
"\\" => Some("Bslash"),
"|" => Some("Bar"),
"\t" => Some("Tab"),
"\n" => Some("CR"),
_ => None,
}
}
| 33.15534 | 98 | 0.507906 |
f9c7903def43ae55dbdafa142d4ce935eabf287d | 3,494 | use makepad_live_compiler::analyse::ShaderCompileOptions;
use makepad_live_compiler::livetypes::{Geometry,live_str_to_id};
use makepad_live_compiler::livestyles::{LiveStyles, LiveBody,};
struct Cx {
live_styles: LiveStyles,
}
impl Cx {
pub fn add_live_body(&mut self, live_body: LiveBody) {
let mut shader_alloc_start = 0;
if let Err(err) = self.live_styles.add_live_body(live_body, &mut shader_alloc_start) {
eprintln!("{:?}", err);
}
}
}
macro_rules!live {
( $ cx: ident, $ code: literal) => {
$ cx.add_live_body(LiveBody {
file: file!().to_string().replace("\\", ","),
module_path: module_path!().to_string(),
line: line!() as usize,
column: column!() as usize,
code: $ code.to_string()
})
}
}
#[macro_export]
macro_rules!live_id {
( $ path: path) => {
live_str_to_id(module_path!(), stringify!( $ path))
}
}
fn main() {
let mut cx = Cx {live_styles: LiveStyles::default()};
live!(cx, r#"
self::anim_default: Anim {
play: Cut {duration: 0.1},
tracks:[
Float{keys:{0.0: 0.0, 1.0: 1.0}}
]
}
self::my_walk: Walk {
width: Fix(10.),
height: Fix(10.),
margin: {l: -4., t: 0., r: 4., b: 0.}
}
self::my_layout: Layout {
align: all(0.5),
walk: {
width: Compute,
height: Compute,
margin: all(1.0),
},
padding: {l: 16.0, t: 12.0, r: 16.0, b: 12.0},
}
self::text_style_unscaled: TextStyle {
font: "resources/Ubuntu-R.ttf",
font_size: 8.0,
brightness: 1.0,
curve: 0.6,
line_spacing: 1.4,
top_drop: 1.2,
height_factor: 1.3,
}
self::mycolor: #ff0f;
self::mycolor2: self::mycolor;
self::myslider: 1.0;
render::quad::shader: ShaderLib {
struct Mp {
x: float
}
impl Mp {
fn myfn(inout self) {
}
}
fn vertex() -> vec4 {
return vec4(0., 0., 0., 1.);
}
fn pixel() -> vec4 {
return vec4(1.0, 0.0, 0.0, 1.0);
}
}
self::shader_bg: Shader {
default_geometry: self::mygeom;
geometry mygeom: vec2;
instance myinst: vec2;
use render::quad::shader::*;
fn pixel() -> vec4 {
let v: Mp;
v.myfn();
let x = self::myslider;
let y = self::mycolor;
return vec4(0., 0., 0., 1.);
}
}
"#);
cx.live_styles.geometries.insert(
live_id!(self::mygeom),
Geometry{geometry_id:0}
);
let options = ShaderCompileOptions {
gather_all: false,
create_const_table: false,
no_const_collapse: false
};
cx.live_styles.enumerate_all_shaders( | shader_ast | {
match cx.live_styles.collect_and_analyse_shader_ast(&shader_ast, options) {
Err(err) => {
eprintln!("{}", err);
panic!()
},
Ok(_) => {
println!("OK!");
}
}
})
}
| 26.876923 | 94 | 0.454493 |
1cc0e684927fe55b77a3a101f7a6460f3c5ee5a0 | 1,916 | use {
crate::cle::CreoleLiveEditor,
yew::{
prelude::*,
services::storage::{Area, StorageService},
},
};
pub struct App{
// link: ComponentLink<Self>,
saved_value: String,
}
pub enum Msg{
}
impl Component for App {
type Message = Msg;
type Properties = ();
fn create(_: Self::Properties, _link: ComponentLink<Self>) -> Self {
// web_sys::window().unwrap().navigator().service_worker().register("sw.js");
let storage = StorageService::new(Area::Local).unwrap();
let key = CreoleLiveEditor::get_save_key("editor1");
let saved_value = match storage.restore(&key) {
Ok(v) => v,
_ => String::new()
};
Self { /* link, */saved_value, }
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
false
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
false
}
fn view(&self) -> Html {
let value = if self.saved_value == "" {"== WASM Creole Live editor
----
=== headings
== h1
=== h2
==== h3
----
=== text styles
//italic// and **bold**.
linebreak1\\\\linebreak2
----
=== unordered list
* a
** b
*** c
----
=== ordered list
# a
## b
### c
----
=== images
{{https://www.w3schools.com/html/w3schools.jpg}}
{{https://www.w3schools.com/html/w3schools.jpg|w3schools}}
----
=== links
[[https://www.w3schools.com/]]
[[javascript:alert('hi')|alert me \"hi\"]]
[[/|reload to test autosave]]"} else { &self.saved_value };
html! { <>
<div class="wrapper">
<h2>{"WASM Creole Live Editor example"}</h2>
<CreoleLiveEditor name="editor1" value=value />
</div>
<div class="wrapper">
<h2>{"Preview-only mode (last saved content of editor above)"}</h2>
<CreoleLiveEditor name="editor1" editable=false />
</div>
</>}
}
} | 22.541176 | 85 | 0.543841 |
e27a332f72b0d72c46f73fadcf4496ac05bf10a7 | 4,341 | #[macro_use]
extern crate clap;
extern crate rayon;
extern crate rand;
extern crate primal;
extern crate image;
extern crate num;
mod backends;
mod checkers;
mod extra_ops;
use std::error::Error as StdError;
use std::str;
use std::iter;
use std::fs::File;
use std::io::Write;
use rayon::prelude::*;
use num::traits::Num;
use num::integer::Integer;
use image::{GenericImage, Pixel, FilterType};
use backends::BigInt;
use extra_ops::ToStrRadix;
use checkers::PrimeChecker;
use checkers::SieveChecker;
use checkers::MillerRabinChecker;
type Error = Box<StdError>;
const MAX_ITERATIONS: usize = 1 << 32;
pub fn next_prime<N, C>(from: N, checker: &C) -> N
where N: Num + Integer + Clone + From<usize> + Send + Sync,
C: PrimeChecker<Value=N> + Sync
{
let n = if from.is_even() {
from + N::one()
} else {
from
};
(0..MAX_ITERATIONS).into_par_iter()
.map(|i| {
n.clone() + N::from(i * 2)
})
.find_any(|n| {
checker.check(n)
})
.expect("Could not find prime number")
}
fn run() -> Result<(), Error> {
let matches = clap_app!(primify =>
(version: "0.1")
(author: "0xd34d10cc")
(about: "Generate prime number that in binary form looks like input image")
(@arg INPUT: -i --input +takes_value +required "Input file name")
(@arg OUTPUT: -o --output +takes_value "Output file name")
(@arg ITER: -n --witnesses +takes_value "Number of witnesses")
(@arg SIEVE: -s --sieve +takes_value "Sieve upper bound")
(@arg MAX_WIDTH: -w --width +takes_value "Max image width")
(@arg MAX_HEIGTH: -h --height +takes_value "Max image height")
(@arg EDGE: -e --edge +takes_value "The color edge between 0 and 1 (0-255)")
).get_matches();
let input = matches.value_of("INPUT").unwrap();
let output = matches.value_of("OUTPUT").unwrap_or("output.txt");
let edge = matches.value_of("EDGE")
.map(|e| e.parse())
.unwrap_or(Ok(128))?;
let width: u32 = matches.value_of("MAX_WIDTH")
.map(|w| w.parse())
.unwrap_or(Ok(100))?;
let height: u32 = matches.value_of("MAX_HEIGTH")
.map(|h| h.parse())
.unwrap_or(Ok(100))?;
let sieve_size = matches.value_of("SIEVE")
.map(|s| s.parse())
.unwrap_or(Ok(8192))?;
let witnessess = matches.value_of("ITER")
.map(|i| i.parse())
.unwrap_or(Ok(25))?;
let image = image::open(input)?
.resize(width, height, FilterType::Lanczos3)
.grayscale();
let image = image.resize_exact(image.width() * 2, image.height(), FilterType::Lanczos3);
let width = image.width() as usize;
let height = image.height() as usize;
println!("Result image size is {}x{}", width, height);
let npixels = width * height;
let mut ascii_image = String::with_capacity(npixels + height);
for (_, _, pixel) in image.pixels() {
let (r, g, b, _) = pixel.to_rgb().channels4();
let (r, g, b) = (r as u32, g as u32, b as u32);
if r + g + b <= edge * 3 {
ascii_image.push('1');
} else {
ascii_image.push('0');
}
}
let nzeros = ascii_image.chars()
.take_while(|&c| c == '0')
.count();
let num = BigInt::from_str_radix(&ascii_image[nzeros..], 2)?;
println!("Number is {} bits long", npixels - nzeros);
let checker = {
let sieve = SieveChecker::new(sieve_size);
let miller_rabin = MillerRabinChecker::new(witnessess);
sieve.combine(miller_rabin)
};
let next_prime = next_prime(num, &checker);
let bin = {
let mut bin = String::with_capacity(npixels);
bin.extend(iter::repeat('0').take(nzeros));
let prime = next_prime.to_str(2u8);
bin.push_str(&prime);
bin
};
ascii_image.clear();
for line in bin.as_bytes().chunks(width) {
let line = unsafe { str::from_utf8_unchecked(line) };
ascii_image.push_str(line);
ascii_image.push('\n');
}
let mut out = File::create(output)?;
out.write_all(ascii_image.as_bytes())?;
Ok(())
}
fn main() {
if let Err(e) = run() {
println!("An error occurred: {}", e);
}
} | 29.530612 | 95 | 0.576595 |
39822a14e4a2c86713119d4033d94cdc4a0c389b | 4,803 | #[macro_use]
pub mod layouts;
mod events;
mod handler;
mod ui_commands;
use std::sync::Arc;
use std::process::Stdio;
use rmpv::Value;
use nvim_rs::{create::tokio as create, UiAttachOptions};
use tokio::runtime::Runtime;
use tokio::process::Command;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use log::{info, error, trace};
pub use events::*;
pub use layouts::*;
use crate::settings::*;
pub use ui_commands::UiCommand;
use handler::NeovimHandler;
use crate::error_handling::ResultPanicExplanation;
use crate::INITIAL_DIMENSIONS;
lazy_static! {
pub static ref BRIDGE: Bridge = Bridge::new();
}
#[cfg(target_os = "windows")]
fn set_windows_creation_flags(cmd: &mut Command) {
cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW
}
fn create_nvim_command() -> Command {
let mut cmd = Command::new("nvim");
cmd.arg("--embed")
.args(SETTINGS.neovim_arguments.iter().skip(1))
.stderr(Stdio::inherit());
#[cfg(target_os = "windows")]
set_windows_creation_flags(&mut cmd);
cmd
}
async fn drain(receiver: &mut UnboundedReceiver<UiCommand>) -> Option<Vec<UiCommand>> {
if let Some(ui_command) = receiver.recv().await {
let mut results = vec![ui_command];
while let Ok(ui_command) = receiver.try_recv() {
results.push(ui_command);
}
Some(results)
} else {
None
}
}
async fn start_process(mut receiver: UnboundedReceiver<UiCommand>) {
let (width, height) = INITIAL_DIMENSIONS;
let (mut nvim, io_handler, _) = create::new_child_cmd(&mut create_nvim_command(), NeovimHandler()).await
.unwrap_or_explained_panic("Could not locate or start the neovim process");
tokio::spawn(async move {
info!("Close watcher started");
match io_handler.await {
Err(join_error) => error!("Error joining IO loop: '{}'", join_error),
Ok(Err(error)) => {
if !error.is_channel_closed() {
error!("Error: '{}'", error);
}
},
Ok(Ok(())) => {}
};
std::process::exit(0);
});
if let Ok(Value::Integer(correct_version)) = nvim.eval("has(\"nvim-0.4\")").await {
if correct_version.as_i64() != Some(1) {
error!("Neovide requires version 0.4 or higher");
std::process::exit(0);
}
} else {
error!("Neovide requires version 0.4 or higher");
std::process::exit(0);
};
nvim.set_var("neovide", Value::Boolean(true)).await
.unwrap_or_explained_panic("Could not communicate with neovim process");
let mut options = UiAttachOptions::new();
options.set_linegrid_external(true);
options.set_rgb(true);
nvim.ui_attach(width as i64, height as i64, &options).await
.unwrap_or_explained_panic("Could not attach ui to neovim process");
if let Err(command_error) = nvim.command("runtime! ginit.vim").await {
nvim.command(&format!("echomsg \"error encountered in ginit.vim {:?}\"", command_error)).await.ok();
}
info!("Neovim process attached");
let nvim = Arc::new(nvim);
let input_nvim = nvim.clone();
tokio::spawn(async move {
info!("UiCommand processor started");
while let Some(commands) = drain(&mut receiver).await {
let (resize_list, other_commands): (Vec<UiCommand>, Vec<UiCommand>) = commands
.into_iter()
.partition(|command| command.is_resize());
for command in resize_list
.into_iter().last().into_iter()
.chain(other_commands.into_iter()) {
let input_nvim = input_nvim.clone();
tokio::spawn(async move {
trace!("Executing UiCommand: {:?}", &command);
command.execute(&input_nvim).await;
});
}
}
});
SETTINGS.read_initial_values(&nvim).await;
SETTINGS.setup_changed_listeners(&nvim).await;
nvim.set_option("lazyredraw", Value::Boolean(false)).await
.ok();
}
pub struct Bridge {
_runtime: Runtime, // Necessary to keep runtime running
sender: UnboundedSender<UiCommand>
}
impl Bridge {
pub fn new() -> Bridge {
let runtime = Runtime::new().unwrap();
let (sender, receiver) = unbounded_channel::<UiCommand>();
runtime.spawn(async move {
start_process(receiver).await;
});
Bridge { _runtime: runtime, sender }
}
pub fn queue_command(&self, command: UiCommand) {
trace!("UiCommand queued: {:?}", &command);
self.sender.send(command)
.unwrap_or_explained_panic(
"Could not send UI command from the window system to the neovim process.");
}
}
| 30.987097 | 108 | 0.612117 |
d64a28ecd1bf379a4fb8bd2658d38214f11f968e | 1,760 | // iterators2.rs
// In this exercise, you'll learn some of the unique advantages that iterators
// can offer. Follow the steps to complete the exercise.
// As always, there are hints if you execute `rustlings hint iterators2`!
// Step 1.
// Complete the `capitalize_first` function.
// "hello" -> "Hello"
pub fn capitalize_first(input: &str) -> String {
let mut c = input.chars();
match c.next() {
None => String::new(),
Some(first) => {
let mut s = first.to_uppercase().to_string();
s.extend(c);
s
}
}
}
// Step 2.
// Apply the `capitalize_first` function to a slice of string slices.
// Return a vector of strings.
// ["hello", "world"] -> ["Hello", "World"]
pub fn capitalize_words_vector(words: &[&str]) -> Vec<String> {
words.into_iter().copied().map(capitalize_first).collect()
}
// Step 3.
// Apply the `capitalize_first` function again to a slice of string slices.
// Return a single string.
// ["hello", " ", "world"] -> "Hello World"
pub fn capitalize_words_string(words: &[&str]) -> String {
let mut s = String::new();
s.extend(words.into_iter().copied().map(capitalize_first));
s
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_success() {
assert_eq!(capitalize_first("hello"), "Hello");
}
#[test]
fn test_empty() {
assert_eq!(capitalize_first(""), "");
}
#[test]
fn test_iterate_string_vec() {
let words = vec!["hello", "world"];
assert_eq!(capitalize_words_vector(&words), ["Hello", "World"]);
}
#[test]
fn test_iterate_into_string() {
let words = vec!["hello", " ", "world"];
assert_eq!(capitalize_words_string(&words), "Hello World");
}
}
| 27.076923 | 78 | 0.602841 |
28d3c461f8c82ae0828260d859ae6aacf2455932 | 396 | use crate::Unit;
pub struct Shop {
units: Vec<Box<dyn Unit>>,
}
impl Unit for Shop {
fn calculate_price(&self) -> f64 {
self.units.iter().map(|elem| elem.calculate_price()).sum()
}
}
impl Shop {
pub fn add(&mut self, unit: Box<dyn Unit>) {
self.units.push(unit)
}
}
impl Default for Shop {
fn default() -> Self {
Self { units: vec![] }
}
}
| 16.5 | 66 | 0.558081 |
23baabd5796583a615ddb2f8169e49af451b2c9b | 9,970 | // Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
counters::TXPOOL_SERVICE_HISTOGRAM,
pool,
pool::{
PendingOrdering, PendingSettings, PoolTransaction, PrioritizationStrategy, Status,
TxStatus, UnverifiedUserTransaction, VerifiedTransaction,
},
pool_client::{NonceCache, PoolClient},
};
use crate::pool::TransactionQueue;
use anyhow::Result;
use crypto::hash::HashValue;
use futures_channel::mpsc;
use parking_lot::RwLock;
use starcoin_config::NodeConfig;
use starcoin_txpool_api::{TxPoolStatus, TxPoolSyncService};
use std::sync::Arc;
use storage::Store;
use types::{
account_address::AccountAddress,
block::{Block, BlockHeader},
transaction,
transaction::SignedUserTransaction,
};
#[derive(Clone, Debug)]
pub struct TxPoolService {
inner: Inner,
}
impl TxPoolService {
pub fn new(
node_config: Arc<NodeConfig>,
storage: Arc<dyn Store>,
chain_header: BlockHeader,
) -> Self {
let pool_config = &node_config.tx_pool;
let verifier_options = pool::VerifierOptions {
no_early_reject: false,
};
let queue = TxnQueue::new(
tx_pool::Options {
max_count: pool_config.max_count as usize,
max_mem_usage: pool_config.max_mem_usage as usize,
max_per_sender: pool_config.max_per_sender as usize,
},
verifier_options,
PrioritizationStrategy::GasPriceOnly,
);
let queue = Arc::new(queue);
let inner = Inner {
node_config,
queue,
storage,
chain_header: Arc::new(RwLock::new(chain_header)),
sequence_number_cache: NonceCache::new(128),
};
Self { inner }
}
#[cfg(test)]
pub fn get_store(&self) -> Arc<dyn Store> {
self.inner.storage.clone()
}
pub(crate) fn from_inner(inner: Inner) -> TxPoolService {
Self { inner }
}
pub(crate) fn get_inner(&self) -> Inner {
self.inner.clone()
}
}
impl TxPoolSyncService for TxPoolService {
fn add_txns(
&self,
txns: Vec<SignedUserTransaction>,
) -> Vec<Result<(), transaction::TransactionError>> {
// _timer will observe_duration when it's dropped.
// We don't need to call it explicitly.
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["add_txns"])
.start_timer();
self.inner.import_txns(txns)
}
fn remove_txn(&self, txn_hash: HashValue, is_invalid: bool) -> Option<SignedUserTransaction> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["remove_txn"])
.start_timer();
self.inner
.remove_txn(txn_hash, is_invalid)
.map(|t| t.signed().clone())
}
/// Get all pending txns which is ok to be packaged to mining.
fn get_pending_txns(
&self,
max_len: Option<u64>,
current_timestamp_secs: Option<u64>,
) -> Vec<SignedUserTransaction> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["get_pending_txns"])
.start_timer();
let current_timestamp_secs = current_timestamp_secs
.unwrap_or_else(|| self.inner.node_config.net().time_service().now_secs());
let r = self
.inner
.get_pending(max_len.unwrap_or(u64::MAX), current_timestamp_secs);
r.into_iter().map(|t| t.signed().clone()).collect()
}
/// Returns next valid sequence number for given sender
/// or `None` if there are no pending transactions from that sender.
fn next_sequence_number(&self, address: AccountAddress) -> Option<u64> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["next_sequence_number"])
.start_timer();
self.inner.next_sequence_number(address)
}
/// subscribe
fn subscribe_txns(
&self,
) -> mpsc::UnboundedReceiver<Arc<Vec<(HashValue, transaction::TxStatus)>>> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["subscribe_txns"])
.start_timer();
self.inner.subscribe_txns()
}
fn subscribe_pending_txn(&self) -> mpsc::UnboundedReceiver<Arc<Vec<HashValue>>> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["subscribe_pending_txns"])
.start_timer();
self.inner.subscribe_pending_txns()
}
/// rollback
fn chain_new_block(&self, enacted: Vec<Block>, retracted: Vec<Block>) -> Result<()> {
let _timer = TXPOOL_SERVICE_HISTOGRAM
.with_label_values(&["rollback"])
.start_timer();
self.inner.chain_new_block(enacted, retracted)
}
fn status(&self) -> TxPoolStatus {
self.inner.queue.status().into()
}
fn find_txn(&self, hash: &HashValue) -> Option<SignedUserTransaction> {
self.inner
.queue
.find(hash)
.map(move |txn| txn.signed().clone())
}
fn txns_of_sender(
&self,
sender: &AccountAddress,
max_len: Option<usize>,
) -> Vec<SignedUserTransaction> {
self.inner
.queue
.txns_of_sender(sender, max_len.unwrap_or(usize::max_value()))
.into_iter()
.map(|t| t.signed().clone())
.collect()
}
}
pub(crate) type TxnQueue = TransactionQueue;
#[derive(Clone)]
pub(crate) struct Inner {
node_config: Arc<NodeConfig>,
queue: Arc<TxnQueue>,
chain_header: Arc<RwLock<BlockHeader>>,
storage: Arc<dyn Store>,
sequence_number_cache: NonceCache,
}
impl std::fmt::Debug for Inner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"queue: {:?}, chain header: {:?}",
&self.queue, &self.chain_header,
)
}
}
impl Inner {
pub(crate) fn queue(&self) -> Arc<TxnQueue> {
self.queue.clone()
}
pub(crate) fn pool_status(&self) -> Status {
self.queue.status()
}
pub(crate) fn notify_new_chain_header(&self, header: BlockHeader) {
*self.chain_header.write() = header;
self.sequence_number_cache.clear();
}
pub(crate) fn get_chain_header(&self) -> BlockHeader {
self.chain_header.read().clone()
}
pub(crate) fn cull(&self) {
// NOTICE: as the new head block event is sepeated with chain_new_block event,
// we need to remove invalid txn here.
// In fact, it would be better if caller can make it into one.
// In this situation, we don't need to reimport invalid txn on chain_new_block.
let now_seconds = self.chain_header.read().timestamp / 1000;
self.queue.cull(self.get_pool_client(), now_seconds)
}
pub(crate) fn import_txns(
&self,
txns: Vec<transaction::SignedUserTransaction>,
) -> Vec<Result<(), transaction::TransactionError>> {
let txns = txns
.into_iter()
.map(|t| PoolTransaction::Unverified(UnverifiedUserTransaction::from(t)));
self.queue.import(self.get_pool_client(), txns)
}
pub(crate) fn remove_txn(
&self,
txn_hash: HashValue,
is_invalid: bool,
) -> Option<Arc<pool::VerifiedTransaction>> {
let mut removed = self.queue.remove(vec![&txn_hash], is_invalid);
removed
.pop()
.expect("remove should return one result per hash")
}
pub(crate) fn get_pending(
&self,
max_len: u64,
current_timestamp_secs: u64,
) -> Vec<Arc<VerifiedTransaction>> {
let pending_settings = PendingSettings {
block_number: u64::max_value(),
current_timestamp: current_timestamp_secs,
max_len: max_len as usize,
ordering: PendingOrdering::Priority,
};
self.queue.pending(self.get_pool_client(), pending_settings)
}
pub(crate) fn next_sequence_number(&self, address: AccountAddress) -> Option<u64> {
self.queue
.next_sequence_number(self.get_pool_client(), &address)
}
pub(crate) fn subscribe_txns(
&self,
) -> mpsc::UnboundedReceiver<Arc<Vec<(HashValue, TxStatus)>>> {
let (tx, rx) = mpsc::unbounded();
self.queue.add_full_listener(tx);
rx
}
pub(crate) fn subscribe_pending_txns(&self) -> mpsc::UnboundedReceiver<Arc<Vec<HashValue>>> {
let (tx, rx) = mpsc::unbounded();
self.queue.add_pending_listener(tx);
rx
}
pub(crate) fn chain_new_block(&self, enacted: Vec<Block>, retracted: Vec<Block>) -> Result<()> {
debug!(
"receive chain_new_block msg, enacted: {:?}, retracted: {:?}",
enacted
.iter()
.map(|b| b.header().number())
.collect::<Vec<_>>(),
retracted
.iter()
.map(|b| b.header().number())
.collect::<Vec<_>>()
);
// new head block, update chain header
if let Some(block) = enacted.last() {
self.notify_new_chain_header(block.header().clone());
}
// remove outdated txns.
self.cull();
// import retracted txns.
let txns = retracted
.into_iter()
.flat_map(|b| {
let txns: Vec<SignedUserTransaction> = b.into_inner().1.into();
txns.into_iter()
})
.map(|t| PoolTransaction::Retracted(UnverifiedUserTransaction::from(t)));
let _ = self.queue.import(self.get_pool_client(), txns);
Ok(())
}
fn get_pool_client(&self) -> PoolClient {
PoolClient::new(
self.chain_header.read().clone(),
self.storage.clone(),
self.sequence_number_cache.clone(),
)
}
}
| 31.955128 | 100 | 0.593781 |
5b21f42a1315b7a689aed859d5a0b87023a7e989 | 3,659 | static WORDS: &[&str] = &[
"aback",
"abashed",
"abject",
"abortive",
"abrupt",
"abundant",
"acceptable",
"accidental",
"accurate",
"acid",
"acoustic",
"ad",
"agonizing",
"ancient",
"angry",
"animated",
"arrogant",
"average",
"befitting",
"better",
"big",
"billowy",
"boundless",
"bright",
"broad",
"burly",
"callous",
"capable",
"careful",
"careless",
"charming",
"chemical",
"chivalrous",
"classy",
"clever",
"cloistered",
"cluttered",
"conscious",
"crazy",
"crooked",
"crowded",
"cuddly",
"cultured",
"cut",
"damaged",
"deadpan",
"debonair",
"deeply",
"defective",
"defiant",
"different",
"diligent",
"dirty",
"disgusting",
"drab",
"dreary",
"drunk",
"dusty",
"earthy",
"easy",
"economic",
"efficient",
"enchanted",
"endurable",
"entertaining",
"enthusiastic",
"erect",
"evasive",
"exotic",
"extra-large",
"exuberant",
"fabulous",
"faint",
"false",
"fancy",
"fascinated",
"fat",
"fearful",
"feigned",
"few",
"first",
"five",
"foolish",
"fragile",
"freezing",
"friendly",
"funny",
"fuzzy",
"gaping",
"gaudy",
"giddy",
"glorious",
"glossy",
"great",
"green",
"groovy",
"grubby",
"grumpy",
"guiltless",
"hallowed",
"handsomely",
"hapless",
"helpless",
"high-pitched",
"hilarious",
"hoc",
"holistic",
"honorable",
"huge",
"humdrum",
"hurried",
"ill-fated",
"ill-informed",
"impolite",
"important",
"inquisitive",
"instinctive",
"internal",
"invincible",
"irate",
"jaded",
"knowledgeable",
"languid",
"last",
"late",
"laughable",
"lazy",
"limping",
"literate",
"living",
"long-term",
"loose",
"loud",
"lovely",
"loving",
"luxuriant",
"macabre",
"madly",
"maniacal",
"many",
"meaty",
"melted",
"messy",
"mindless",
"minor",
"momentous",
"mountainous",
"nappy",
"natural",
"nebulous",
"needless",
"nice",
"noxious",
"nutritious",
"obsequious",
"obsolete",
"obtainable",
"open",
"opposite",
"overjoyed",
"overrated",
"painful",
"parsimonious",
"periodic",
"picayune",
"piquant",
"plastic",
"plucky",
"polite",
"premium",
"present",
"puffy",
"pumped",
"quizzical",
"rare",
"red",
"regular",
"reminiscent",
"right",
"rough",
"rural",
"satisfying",
"screeching",
"serious",
"sharp",
"shiny",
"shocking",
"shy",
"silent",
"skillful",
"slim",
"slippery",
"soggy",
"sore",
"spiky",
"splendid",
"spotless",
"spotted",
"staking",
"stale",
"steady",
"stormy",
"sturdy",
"successful",
"succinct",
"sulky",
"super",
"swanky",
"tame",
"teeny",
"teeny-tiny",
"telling",
"terrific",
"thirsty",
"tough",
"trashy",
"tremendous",
"ultra",
"unable",
"unequal",
"unhealthy",
"unsuitable",
"utter",
"uttermost",
"vacuous",
"vagabond",
"vengeful",
"venomous",
"versed",
"volatile",
"wanting",
"warlike",
"warm",
"weak",
"wealthy",
"wiggly",
"windy",
"woozy",
"worthless",
"wrathful",
"wretched",
"wry",
"zany",
];
| 14.873984 | 26 | 0.462695 |
6290c97f934a0b22f30f8443c33cbc5dd44675e9 | 1,893 | use crate::prelude::*;
use crate::utils::NoNull;
impl<T> ChunkTakeEvery<T> for ChunkedArray<T>
where
T: PolarsNumericType,
{
fn take_every(&self, n: usize) -> ChunkedArray<T> {
if self.null_count() == 0 {
let a: NoNull<_> = self.into_no_null_iter().step_by(n).collect();
a.into_inner()
} else {
self.into_iter().step_by(n).collect()
}
}
}
impl ChunkTakeEvery<BooleanType> for BooleanChunked {
fn take_every(&self, n: usize) -> BooleanChunked {
if self.null_count() == 0 {
self.into_no_null_iter().step_by(n).collect()
} else {
self.into_iter().step_by(n).collect()
}
}
}
impl ChunkTakeEvery<Utf8Type> for Utf8Chunked {
fn take_every(&self, n: usize) -> Utf8Chunked {
if self.null_count() == 0 {
self.into_no_null_iter().step_by(n).collect()
} else {
self.into_iter().step_by(n).collect()
}
}
}
impl ChunkTakeEvery<ListType> for ListChunked {
fn take_every(&self, n: usize) -> ListChunked {
if self.null_count() == 0 {
self.into_no_null_iter().step_by(n).collect()
} else {
self.into_iter().step_by(n).collect()
}
}
}
impl ChunkTakeEvery<CategoricalType> for CategoricalChunked {
fn take_every(&self, n: usize) -> CategoricalChunked {
let mut ca = if self.null_count() == 0 {
let ca: NoNull<UInt32Chunked> = self.into_no_null_iter().step_by(n).collect();
ca.into_inner()
} else {
self.into_iter().step_by(n).collect()
};
ca.categorical_map = self.categorical_map.clone();
ca.cast().unwrap()
}
}
#[cfg(feature = "object")]
impl<T> ChunkTakeEvery<ObjectType<T>> for ObjectChunked<T> {
fn take_every(&self, _n: usize) -> ObjectChunked<T> {
todo!()
}
}
| 28.681818 | 90 | 0.580032 |
509974b6a8de1cd85f3bcdd985c708022ef30a43 | 283,793 | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`CancelRotateSecretInput`](crate::input::CancelRotateSecretInput)
pub mod cancel_rotate_secret_input {
/// A builder for [`CancelRotateSecretInput`](crate::input::CancelRotateSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`CancelRotateSecretInput`](crate::input::CancelRotateSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::CancelRotateSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::CancelRotateSecretInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type CancelRotateSecretInputOperationOutputAlias = crate::operation::CancelRotateSecret;
#[doc(hidden)]
pub type CancelRotateSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl CancelRotateSecretInput {
/// Consumes the builder and constructs an Operation<[`CancelRotateSecret`](crate::operation::CancelRotateSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::CancelRotateSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::CancelRotateSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::CancelRotateSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.CancelRotateSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_cancel_rotate_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::CancelRotateSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"CancelRotateSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`CancelRotateSecretInput`](crate::input::CancelRotateSecretInput)
pub fn builder() -> crate::input::cancel_rotate_secret_input::Builder {
crate::input::cancel_rotate_secret_input::Builder::default()
}
}
/// See [`CreateSecretInput`](crate::input::CreateSecretInput)
pub mod create_secret_input {
/// A builder for [`CreateSecretInput`](crate::input::CreateSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) client_request_token: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) kms_key_id: std::option::Option<std::string::String>,
pub(crate) secret_binary: std::option::Option<aws_smithy_types::Blob>,
pub(crate) secret_string: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) add_replica_regions:
std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
pub(crate) force_overwrite_replica_secret: std::option::Option<bool>,
}
impl Builder {
/// <p>The name of the new secret.</p>
/// <p>The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@-</p>
/// <p>Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters after the secret name at the end of the ARN.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
/// <p>The name of the new secret.</p>
/// <p>The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@-</p>
/// <p>Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters after the secret name at the end of the ARN.</p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates an initial version for the secret, and this parameter specifies the unique identifier for the new version. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness of your versions within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and the version <code>SecretString</code> and <code>SecretBinary</code> values are the same as those in the request, then the request is ignored.</p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use <code>PutSecretValue</code> to create a new version.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self {
self.client_request_token = Some(input.into());
self
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates an initial version for the secret, and this parameter specifies the unique identifier for the new version. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness of your versions within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and the version <code>SecretString</code> and <code>SecretBinary</code> values are the same as those in the request, then the request is ignored.</p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use <code>PutSecretValue</code> to create a new version.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.client_request_token = input;
self
}
/// <p>The description of the secret.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>The description of the secret.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt the secret value in the secret.</p>
/// <p>To use a KMS key in a different account, use the key ARN or the alias ARN.</p>
/// <p>If you don't specify this value, then Secrets Manager uses the key <code>aws/secretsmanager</code>. If that key doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.</p>
/// <p>If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use <code>aws/secretsmanager</code> to encrypt the secret, and you must create and use a customer managed KMS key. </p>
pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
self.kms_key_id = Some(input.into());
self
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt the secret value in the secret.</p>
/// <p>To use a KMS key in a different account, use the key ARN or the alias ARN.</p>
/// <p>If you don't specify this value, then Secrets Manager uses the key <code>aws/secretsmanager</code>. If that key doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.</p>
/// <p>If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use <code>aws/secretsmanager</code> to encrypt the secret, and you must create and use a customer managed KMS key. </p>
pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.kms_key_id = input;
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>This parameter is not available in the Secrets Manager console.</p>
pub fn secret_binary(mut self, input: aws_smithy_types::Blob) -> Self {
self.secret_binary = Some(input);
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>This parameter is not available in the Secrets Manager console.</p>
pub fn set_secret_binary(
mut self,
input: std::option::Option<aws_smithy_types::Blob>,
) -> Self {
self.secret_binary = input;
self
}
/// <p>The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the <code>SecretString</code> parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.</p>
pub fn secret_string(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_string = Some(input.into());
self
}
/// <p>The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the <code>SecretString</code> parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.</p>
pub fn set_secret_string(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.secret_string = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example:</p>
/// <p> <code>[{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}]</code> </p>
/// <p>Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a different tag from one with key "abc".</p>
/// <p>If you check tags in permissions policies as part of your security strategy, then adding or removing a tag can change permissions. If the completion of this operation would result in you losing your permissions for this secret, then Secrets Manager blocks the operation and returns an <code>Access Denied</code> error. For more information, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac">Control access to secrets using tags</a> and <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2">Limit access to identities with tags that match secrets' tags</a>.</p>
/// <p>For information about how to format a JSON parameter for the various command line tool environments, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json">Using JSON for Parameters</a>. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.</p>
/// <p>The following restrictions apply to tags:</p>
/// <ul>
/// <li> <p>Maximum number of tags per secret: 50</p> </li>
/// <li> <p>Maximum key length: 127 Unicode characters in UTF-8</p> </li>
/// <li> <p>Maximum value length: 255 Unicode characters in UTF-8</p> </li>
/// <li> <p>Tag keys and values are case sensitive.</p> </li>
/// <li> <p>Do not use the <code>aws:</code> prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.</p> </li>
/// <li> <p>If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.</p> </li>
/// </ul>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example:</p>
/// <p> <code>[{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}]</code> </p>
/// <p>Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a different tag from one with key "abc".</p>
/// <p>If you check tags in permissions policies as part of your security strategy, then adding or removing a tag can change permissions. If the completion of this operation would result in you losing your permissions for this secret, then Secrets Manager blocks the operation and returns an <code>Access Denied</code> error. For more information, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac">Control access to secrets using tags</a> and <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2">Limit access to identities with tags that match secrets' tags</a>.</p>
/// <p>For information about how to format a JSON parameter for the various command line tool environments, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json">Using JSON for Parameters</a>. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.</p>
/// <p>The following restrictions apply to tags:</p>
/// <ul>
/// <li> <p>Maximum number of tags per secret: 50</p> </li>
/// <li> <p>Maximum key length: 127 Unicode characters in UTF-8</p> </li>
/// <li> <p>Maximum value length: 255 Unicode characters in UTF-8</p> </li>
/// <li> <p>Tag keys and values are case sensitive.</p> </li>
/// <li> <p>Do not use the <code>aws:</code> prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.</p> </li>
/// <li> <p>If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.</p> </li>
/// </ul>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Appends an item to `add_replica_regions`.
///
/// To override the contents of this collection use [`set_add_replica_regions`](Self::set_add_replica_regions).
///
/// <p>A list of Regions and KMS keys to replicate secrets.</p>
pub fn add_replica_regions(mut self, input: crate::model::ReplicaRegionType) -> Self {
let mut v = self.add_replica_regions.unwrap_or_default();
v.push(input);
self.add_replica_regions = Some(v);
self
}
/// <p>A list of Regions and KMS keys to replicate secrets.</p>
pub fn set_add_replica_regions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
) -> Self {
self.add_replica_regions = input;
self
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn force_overwrite_replica_secret(mut self, input: bool) -> Self {
self.force_overwrite_replica_secret = Some(input);
self
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn set_force_overwrite_replica_secret(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.force_overwrite_replica_secret = input;
self
}
/// Consumes the builder and constructs a [`CreateSecretInput`](crate::input::CreateSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::CreateSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::CreateSecretInput {
name: self.name,
client_request_token: self.client_request_token,
description: self.description,
kms_key_id: self.kms_key_id,
secret_binary: self.secret_binary,
secret_string: self.secret_string,
tags: self.tags,
add_replica_regions: self.add_replica_regions,
force_overwrite_replica_secret: self
.force_overwrite_replica_secret
.unwrap_or_default(),
})
}
}
}
#[doc(hidden)]
pub type CreateSecretInputOperationOutputAlias = crate::operation::CreateSecret;
#[doc(hidden)]
pub type CreateSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl CreateSecretInput {
/// Consumes the builder and constructs an Operation<[`CreateSecret`](crate::operation::CreateSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
mut self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::CreateSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
if self.client_request_token.is_none() {
self.client_request_token = Some(_config.make_token.make_idempotency_token());
}
let mut request = {
fn uri_base(
_input: &crate::input::CreateSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::CreateSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.CreateSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_create_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::CreateSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"CreateSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`CreateSecretInput`](crate::input::CreateSecretInput)
pub fn builder() -> crate::input::create_secret_input::Builder {
crate::input::create_secret_input::Builder::default()
}
}
/// See [`DeleteResourcePolicyInput`](crate::input::DeleteResourcePolicyInput)
pub mod delete_resource_policy_input {
/// A builder for [`DeleteResourcePolicyInput`](crate::input::DeleteResourcePolicyInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret to delete the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to delete the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`DeleteResourcePolicyInput`](crate::input::DeleteResourcePolicyInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteResourcePolicyInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteResourcePolicyInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type DeleteResourcePolicyInputOperationOutputAlias = crate::operation::DeleteResourcePolicy;
#[doc(hidden)]
pub type DeleteResourcePolicyInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl DeleteResourcePolicyInput {
/// Consumes the builder and constructs an Operation<[`DeleteResourcePolicy`](crate::operation::DeleteResourcePolicy)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::DeleteResourcePolicy,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::DeleteResourcePolicyInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::DeleteResourcePolicyInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.DeleteResourcePolicy",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_delete_resource_policy(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::DeleteResourcePolicy::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"DeleteResourcePolicy",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`DeleteResourcePolicyInput`](crate::input::DeleteResourcePolicyInput)
pub fn builder() -> crate::input::delete_resource_policy_input::Builder {
crate::input::delete_resource_policy_input::Builder::default()
}
}
/// See [`DeleteSecretInput`](crate::input::DeleteSecretInput)
pub mod delete_secret_input {
/// A builder for [`DeleteSecretInput`](crate::input::DeleteSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) recovery_window_in_days: std::option::Option<i64>,
pub(crate) force_delete_without_recovery: std::option::Option<bool>,
}
impl Builder {
/// <p>The ARN or name of the secret to delete.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to delete.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>The number of days from 7 to 30 that Secrets Manager waits before permanently deleting the secret. You can't use both this parameter and <code>ForceDeleteWithoutRecovery</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
pub fn recovery_window_in_days(mut self, input: i64) -> Self {
self.recovery_window_in_days = Some(input);
self
}
/// <p>The number of days from 7 to 30 that Secrets Manager waits before permanently deleting the secret. You can't use both this parameter and <code>ForceDeleteWithoutRecovery</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
pub fn set_recovery_window_in_days(mut self, input: std::option::Option<i64>) -> Self {
self.recovery_window_in_days = input;
self
}
/// <p>Specifies whether to delete the secret without any recovery window. You can't use both this parameter and <code>RecoveryWindowInDays</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
/// <p>Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.</p> <important>
/// <p>Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the <code>RecoveryWindowInDays</code> parameter. If you delete a secret with the <code>ForceDeleteWithouRecovery</code> parameter, then you have no opportunity to recover the secret. You lose the secret permanently.</p>
/// </important>
pub fn force_delete_without_recovery(mut self, input: bool) -> Self {
self.force_delete_without_recovery = Some(input);
self
}
/// <p>Specifies whether to delete the secret without any recovery window. You can't use both this parameter and <code>RecoveryWindowInDays</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
/// <p>Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.</p> <important>
/// <p>Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the <code>RecoveryWindowInDays</code> parameter. If you delete a secret with the <code>ForceDeleteWithouRecovery</code> parameter, then you have no opportunity to recover the secret. You lose the secret permanently.</p>
/// </important>
pub fn set_force_delete_without_recovery(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.force_delete_without_recovery = input;
self
}
/// Consumes the builder and constructs a [`DeleteSecretInput`](crate::input::DeleteSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteSecretInput {
secret_id: self.secret_id,
recovery_window_in_days: self.recovery_window_in_days,
force_delete_without_recovery: self.force_delete_without_recovery,
})
}
}
}
#[doc(hidden)]
pub type DeleteSecretInputOperationOutputAlias = crate::operation::DeleteSecret;
#[doc(hidden)]
pub type DeleteSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl DeleteSecretInput {
/// Consumes the builder and constructs an Operation<[`DeleteSecret`](crate::operation::DeleteSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::DeleteSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::DeleteSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::DeleteSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.DeleteSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_delete_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::DeleteSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"DeleteSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`DeleteSecretInput`](crate::input::DeleteSecretInput)
pub fn builder() -> crate::input::delete_secret_input::Builder {
crate::input::delete_secret_input::Builder::default()
}
}
/// See [`DescribeSecretInput`](crate::input::DescribeSecretInput)
pub mod describe_secret_input {
/// A builder for [`DescribeSecretInput`](crate::input::DescribeSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret. </p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret. </p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`DescribeSecretInput`](crate::input::DescribeSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeSecretInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type DescribeSecretInputOperationOutputAlias = crate::operation::DescribeSecret;
#[doc(hidden)]
pub type DescribeSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl DescribeSecretInput {
/// Consumes the builder and constructs an Operation<[`DescribeSecret`](crate::operation::DescribeSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::DescribeSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::DescribeSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::DescribeSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.DescribeSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_describe_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::DescribeSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"DescribeSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`DescribeSecretInput`](crate::input::DescribeSecretInput)
pub fn builder() -> crate::input::describe_secret_input::Builder {
crate::input::describe_secret_input::Builder::default()
}
}
/// See [`GetRandomPasswordInput`](crate::input::GetRandomPasswordInput)
pub mod get_random_password_input {
/// A builder for [`GetRandomPasswordInput`](crate::input::GetRandomPasswordInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) password_length: std::option::Option<i64>,
pub(crate) exclude_characters: std::option::Option<std::string::String>,
pub(crate) exclude_numbers: std::option::Option<bool>,
pub(crate) exclude_punctuation: std::option::Option<bool>,
pub(crate) exclude_uppercase: std::option::Option<bool>,
pub(crate) exclude_lowercase: std::option::Option<bool>,
pub(crate) include_space: std::option::Option<bool>,
pub(crate) require_each_included_type: std::option::Option<bool>,
}
impl Builder {
/// <p>The length of the password. If you don't include this parameter, the default length is 32 characters.</p>
pub fn password_length(mut self, input: i64) -> Self {
self.password_length = Some(input);
self
}
/// <p>The length of the password. If you don't include this parameter, the default length is 32 characters.</p>
pub fn set_password_length(mut self, input: std::option::Option<i64>) -> Self {
self.password_length = input;
self
}
/// <p>A string of the characters that you don't want in the password.</p>
pub fn exclude_characters(mut self, input: impl Into<std::string::String>) -> Self {
self.exclude_characters = Some(input.into());
self
}
/// <p>A string of the characters that you don't want in the password.</p>
pub fn set_exclude_characters(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.exclude_characters = input;
self
}
/// <p>Specifies whether to exclude numbers from the password. If you don't include this switch, the password can contain numbers.</p>
pub fn exclude_numbers(mut self, input: bool) -> Self {
self.exclude_numbers = Some(input);
self
}
/// <p>Specifies whether to exclude numbers from the password. If you don't include this switch, the password can contain numbers.</p>
pub fn set_exclude_numbers(mut self, input: std::option::Option<bool>) -> Self {
self.exclude_numbers = input;
self
}
/// <p>Specifies whether to exclude the following punctuation characters from the password: <code>! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~</code>. If you don't include this switch, the password can contain punctuation.</p>
pub fn exclude_punctuation(mut self, input: bool) -> Self {
self.exclude_punctuation = Some(input);
self
}
/// <p>Specifies whether to exclude the following punctuation characters from the password: <code>! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~</code>. If you don't include this switch, the password can contain punctuation.</p>
pub fn set_exclude_punctuation(mut self, input: std::option::Option<bool>) -> Self {
self.exclude_punctuation = input;
self
}
/// <p>Specifies whether to exclude uppercase letters from the password. If you don't include this switch, the password can contain uppercase letters.</p>
pub fn exclude_uppercase(mut self, input: bool) -> Self {
self.exclude_uppercase = Some(input);
self
}
/// <p>Specifies whether to exclude uppercase letters from the password. If you don't include this switch, the password can contain uppercase letters.</p>
pub fn set_exclude_uppercase(mut self, input: std::option::Option<bool>) -> Self {
self.exclude_uppercase = input;
self
}
/// <p>Specifies whether to exclude lowercase letters from the password. If you don't include this switch, the password can contain lowercase letters.</p>
pub fn exclude_lowercase(mut self, input: bool) -> Self {
self.exclude_lowercase = Some(input);
self
}
/// <p>Specifies whether to exclude lowercase letters from the password. If you don't include this switch, the password can contain lowercase letters.</p>
pub fn set_exclude_lowercase(mut self, input: std::option::Option<bool>) -> Self {
self.exclude_lowercase = input;
self
}
/// <p>Specifies whether to include the space character. If you include this switch, the password can contain space characters.</p>
pub fn include_space(mut self, input: bool) -> Self {
self.include_space = Some(input);
self
}
/// <p>Specifies whether to include the space character. If you include this switch, the password can contain space characters.</p>
pub fn set_include_space(mut self, input: std::option::Option<bool>) -> Self {
self.include_space = input;
self
}
/// <p>Specifies whether to include at least one upper and lowercase letter, one number, and one punctuation. If you don't include this switch, the password contains at least one of every character type.</p>
pub fn require_each_included_type(mut self, input: bool) -> Self {
self.require_each_included_type = Some(input);
self
}
/// <p>Specifies whether to include at least one upper and lowercase letter, one number, and one punctuation. If you don't include this switch, the password contains at least one of every character type.</p>
pub fn set_require_each_included_type(mut self, input: std::option::Option<bool>) -> Self {
self.require_each_included_type = input;
self
}
/// Consumes the builder and constructs a [`GetRandomPasswordInput`](crate::input::GetRandomPasswordInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetRandomPasswordInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetRandomPasswordInput {
password_length: self.password_length,
exclude_characters: self.exclude_characters,
exclude_numbers: self.exclude_numbers,
exclude_punctuation: self.exclude_punctuation,
exclude_uppercase: self.exclude_uppercase,
exclude_lowercase: self.exclude_lowercase,
include_space: self.include_space,
require_each_included_type: self.require_each_included_type,
})
}
}
}
#[doc(hidden)]
pub type GetRandomPasswordInputOperationOutputAlias = crate::operation::GetRandomPassword;
#[doc(hidden)]
pub type GetRandomPasswordInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl GetRandomPasswordInput {
/// Consumes the builder and constructs an Operation<[`GetRandomPassword`](crate::operation::GetRandomPassword)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetRandomPassword,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::GetRandomPasswordInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetRandomPasswordInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.GetRandomPassword",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_get_random_password(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetRandomPassword::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetRandomPassword",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`GetRandomPasswordInput`](crate::input::GetRandomPasswordInput)
pub fn builder() -> crate::input::get_random_password_input::Builder {
crate::input::get_random_password_input::Builder::default()
}
}
/// See [`GetResourcePolicyInput`](crate::input::GetResourcePolicyInput)
pub mod get_resource_policy_input {
/// A builder for [`GetResourcePolicyInput`](crate::input::GetResourcePolicyInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret to retrieve the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to retrieve the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`GetResourcePolicyInput`](crate::input::GetResourcePolicyInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetResourcePolicyInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetResourcePolicyInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type GetResourcePolicyInputOperationOutputAlias = crate::operation::GetResourcePolicy;
#[doc(hidden)]
pub type GetResourcePolicyInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl GetResourcePolicyInput {
/// Consumes the builder and constructs an Operation<[`GetResourcePolicy`](crate::operation::GetResourcePolicy)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetResourcePolicy,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::GetResourcePolicyInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetResourcePolicyInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.GetResourcePolicy",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_get_resource_policy(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetResourcePolicy::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetResourcePolicy",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`GetResourcePolicyInput`](crate::input::GetResourcePolicyInput)
pub fn builder() -> crate::input::get_resource_policy_input::Builder {
crate::input::get_resource_policy_input::Builder::default()
}
}
/// See [`GetSecretValueInput`](crate::input::GetSecretValueInput)
pub mod get_secret_value_input {
/// A builder for [`GetSecretValueInput`](crate::input::GetSecretValueInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) version_id: std::option::Option<std::string::String>,
pub(crate) version_stage: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret to retrieve.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to retrieve.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>The unique identifier of the version of the secret to retrieve. If you include both this parameter and <code>VersionStage</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, then Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
/// <p>This value is typically a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value with 32 hexadecimal digits.</p>
pub fn version_id(mut self, input: impl Into<std::string::String>) -> Self {
self.version_id = Some(input.into());
self
}
/// <p>The unique identifier of the version of the secret to retrieve. If you include both this parameter and <code>VersionStage</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, then Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
/// <p>This value is typically a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value with 32 hexadecimal digits.</p>
pub fn set_version_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.version_id = input;
self
}
/// <p>The staging label of the version of the secret to retrieve. </p>
/// <p>Secrets Manager uses staging labels to keep track of different versions during the rotation process. If you include both this parameter and <code>VersionId</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
pub fn version_stage(mut self, input: impl Into<std::string::String>) -> Self {
self.version_stage = Some(input.into());
self
}
/// <p>The staging label of the version of the secret to retrieve. </p>
/// <p>Secrets Manager uses staging labels to keep track of different versions during the rotation process. If you include both this parameter and <code>VersionId</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
pub fn set_version_stage(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.version_stage = input;
self
}
/// Consumes the builder and constructs a [`GetSecretValueInput`](crate::input::GetSecretValueInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetSecretValueInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::GetSecretValueInput {
secret_id: self.secret_id,
version_id: self.version_id,
version_stage: self.version_stage,
})
}
}
}
#[doc(hidden)]
pub type GetSecretValueInputOperationOutputAlias = crate::operation::GetSecretValue;
#[doc(hidden)]
pub type GetSecretValueInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl GetSecretValueInput {
/// Consumes the builder and constructs an Operation<[`GetSecretValue`](crate::operation::GetSecretValue)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::GetSecretValue,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::GetSecretValueInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::GetSecretValueInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.GetSecretValue",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_get_secret_value(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::GetSecretValue::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"GetSecretValue",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`GetSecretValueInput`](crate::input::GetSecretValueInput)
pub fn builder() -> crate::input::get_secret_value_input::Builder {
crate::input::get_secret_value_input::Builder::default()
}
}
/// See [`ListSecretsInput`](crate::input::ListSecretsInput)
pub mod list_secrets_input {
/// A builder for [`ListSecretsInput`](crate::input::ListSecretsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) max_results: std::option::Option<i32>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
pub(crate) sort_order: std::option::Option<crate::model::SortOrderType>,
}
impl Builder {
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecrets</code> again with the value from <code>NextToken</code>.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecrets</code> again with the value from <code>NextToken</code>.</p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecrets</code> again with this value.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecrets</code> again with this value.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Appends an item to `filters`.
///
/// To override the contents of this collection use [`set_filters`](Self::set_filters).
///
/// <p>The filters to apply to the list of secrets.</p>
pub fn filters(mut self, input: crate::model::Filter) -> Self {
let mut v = self.filters.unwrap_or_default();
v.push(input);
self.filters = Some(v);
self
}
/// <p>The filters to apply to the list of secrets.</p>
pub fn set_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Filter>>,
) -> Self {
self.filters = input;
self
}
/// <p>Lists secrets in the requested order. </p>
pub fn sort_order(mut self, input: crate::model::SortOrderType) -> Self {
self.sort_order = Some(input);
self
}
/// <p>Lists secrets in the requested order. </p>
pub fn set_sort_order(
mut self,
input: std::option::Option<crate::model::SortOrderType>,
) -> Self {
self.sort_order = input;
self
}
/// Consumes the builder and constructs a [`ListSecretsInput`](crate::input::ListSecretsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListSecretsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ListSecretsInput {
max_results: self.max_results,
next_token: self.next_token,
filters: self.filters,
sort_order: self.sort_order,
})
}
}
}
#[doc(hidden)]
pub type ListSecretsInputOperationOutputAlias = crate::operation::ListSecrets;
#[doc(hidden)]
pub type ListSecretsInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ListSecretsInput {
/// Consumes the builder and constructs an Operation<[`ListSecrets`](crate::operation::ListSecrets)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ListSecrets,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ListSecretsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ListSecretsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.ListSecrets",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_list_secrets(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ListSecrets::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ListSecrets",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ListSecretsInput`](crate::input::ListSecretsInput)
pub fn builder() -> crate::input::list_secrets_input::Builder {
crate::input::list_secrets_input::Builder::default()
}
}
/// See [`ListSecretVersionIdsInput`](crate::input::ListSecretVersionIdsInput)
pub mod list_secret_version_ids_input {
/// A builder for [`ListSecretVersionIdsInput`](crate::input::ListSecretVersionIdsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) include_deprecated: std::option::Option<bool>,
}
impl Builder {
/// <p>The ARN or name of the secret whose versions you want to list.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret whose versions you want to list.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecretVersionIds</code> again with the value from <code>NextToken</code>. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecretVersionIds</code> again with the value from <code>NextToken</code>. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecretVersionIds</code> again with this value.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecretVersionIds</code> again with this value.</p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>Specifies whether to include versions of secrets that don't have any staging labels attached to them. Versions without staging labels are considered deprecated and are subject to deletion by Secrets Manager.</p>
pub fn include_deprecated(mut self, input: bool) -> Self {
self.include_deprecated = Some(input);
self
}
/// <p>Specifies whether to include versions of secrets that don't have any staging labels attached to them. Versions without staging labels are considered deprecated and are subject to deletion by Secrets Manager.</p>
pub fn set_include_deprecated(mut self, input: std::option::Option<bool>) -> Self {
self.include_deprecated = input;
self
}
/// Consumes the builder and constructs a [`ListSecretVersionIdsInput`](crate::input::ListSecretVersionIdsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListSecretVersionIdsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ListSecretVersionIdsInput {
secret_id: self.secret_id,
max_results: self.max_results,
next_token: self.next_token,
include_deprecated: self.include_deprecated,
})
}
}
}
#[doc(hidden)]
pub type ListSecretVersionIdsInputOperationOutputAlias = crate::operation::ListSecretVersionIds;
#[doc(hidden)]
pub type ListSecretVersionIdsInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ListSecretVersionIdsInput {
/// Consumes the builder and constructs an Operation<[`ListSecretVersionIds`](crate::operation::ListSecretVersionIds)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ListSecretVersionIds,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ListSecretVersionIdsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ListSecretVersionIdsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.ListSecretVersionIds",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_list_secret_version_ids(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ListSecretVersionIds::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ListSecretVersionIds",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ListSecretVersionIdsInput`](crate::input::ListSecretVersionIdsInput)
pub fn builder() -> crate::input::list_secret_version_ids_input::Builder {
crate::input::list_secret_version_ids_input::Builder::default()
}
}
/// See [`PutResourcePolicyInput`](crate::input::PutResourcePolicyInput)
pub mod put_resource_policy_input {
/// A builder for [`PutResourcePolicyInput`](crate::input::PutResourcePolicyInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) resource_policy: std::option::Option<std::string::String>,
pub(crate) block_public_policy: std::option::Option<bool>,
}
impl Builder {
/// <p>The ARN or name of the secret to attach the resource-based policy.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to attach the resource-based policy.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>A JSON-formatted string for an Amazon Web Services resource-based policy. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn resource_policy(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_policy = Some(input.into());
self
}
/// <p>A JSON-formatted string for an Amazon Web Services resource-based policy. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn set_resource_policy(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_policy = input;
self
}
/// <p>Specifies whether to block resource-based policies that allow broad access to the secret. By default, Secrets Manager blocks policies that allow broad access, for example those that use a wildcard for the principal.</p>
pub fn block_public_policy(mut self, input: bool) -> Self {
self.block_public_policy = Some(input);
self
}
/// <p>Specifies whether to block resource-based policies that allow broad access to the secret. By default, Secrets Manager blocks policies that allow broad access, for example those that use a wildcard for the principal.</p>
pub fn set_block_public_policy(mut self, input: std::option::Option<bool>) -> Self {
self.block_public_policy = input;
self
}
/// Consumes the builder and constructs a [`PutResourcePolicyInput`](crate::input::PutResourcePolicyInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutResourcePolicyInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::PutResourcePolicyInput {
secret_id: self.secret_id,
resource_policy: self.resource_policy,
block_public_policy: self.block_public_policy,
})
}
}
}
#[doc(hidden)]
pub type PutResourcePolicyInputOperationOutputAlias = crate::operation::PutResourcePolicy;
#[doc(hidden)]
pub type PutResourcePolicyInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl PutResourcePolicyInput {
/// Consumes the builder and constructs an Operation<[`PutResourcePolicy`](crate::operation::PutResourcePolicy)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::PutResourcePolicy,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::PutResourcePolicyInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::PutResourcePolicyInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.PutResourcePolicy",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_put_resource_policy(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::PutResourcePolicy::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"PutResourcePolicy",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`PutResourcePolicyInput`](crate::input::PutResourcePolicyInput)
pub fn builder() -> crate::input::put_resource_policy_input::Builder {
crate::input::put_resource_policy_input::Builder::default()
}
}
/// See [`PutSecretValueInput`](crate::input::PutSecretValueInput)
pub mod put_secret_value_input {
/// A builder for [`PutSecretValueInput`](crate::input::PutSecretValueInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) client_request_token: std::option::Option<std::string::String>,
pub(crate) secret_binary: std::option::Option<aws_smithy_types::Blob>,
pub(crate) secret_string: std::option::Option<std::string::String>,
pub(crate) version_stages: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The ARN or name of the secret to add a new version to.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
/// <p>If the secret doesn't already exist, use <code>CreateSecret</code> instead.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to add a new version to.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
/// <p>If the secret doesn't already exist, use <code>CreateSecret</code> instead.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>A unique identifier for the new version of the secret. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty because they generate a random UUID for you. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request. </p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function processing. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> or <code>SecretBinary</code> values are the same as those in the request then the request is ignored. The operation is idempotent. </p> </li>
/// <li> <p>If a version with this value already exists and the version of the <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self {
self.client_request_token = Some(input.into());
self
}
/// <p>A unique identifier for the new version of the secret. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty because they generate a random UUID for you. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request. </p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function processing. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> or <code>SecretBinary</code> values are the same as those in the request then the request is ignored. The operation is idempotent. </p> </li>
/// <li> <p>If a version with this value already exists and the version of the <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.client_request_token = input;
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>You can't access this value from the Secrets Manager console.</p>
pub fn secret_binary(mut self, input: aws_smithy_types::Blob) -> Self {
self.secret_binary = Some(input);
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>You can't access this value from the Secrets Manager console.</p>
pub fn set_secret_binary(
mut self,
input: std::option::Option<aws_smithy_types::Blob>,
) -> Self {
self.secret_binary = input;
self
}
/// <p>The text to encrypt and store in the new version of the secret. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>We recommend you create the secret string as JSON key/value pairs, as shown in the example.</p>
pub fn secret_string(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_string = Some(input.into());
self
}
/// <p>The text to encrypt and store in the new version of the secret. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>We recommend you create the secret string as JSON key/value pairs, as shown in the example.</p>
pub fn set_secret_string(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.secret_string = input;
self
}
/// Appends an item to `version_stages`.
///
/// To override the contents of this collection use [`set_version_stages`](Self::set_version_stages).
///
/// <p>A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.</p>
/// <p>If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify <code>AWSCURRENT</code>, and it is already attached to another version, then Secrets Manager also moves the staging label <code>AWSPREVIOUS</code> to the version that <code>AWSCURRENT</code> was removed from.</p>
/// <p>If you don't include <code>VersionStages</code>, then Secrets Manager automatically moves the staging label <code>AWSCURRENT</code> to this version.</p>
pub fn version_stages(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.version_stages.unwrap_or_default();
v.push(input.into());
self.version_stages = Some(v);
self
}
/// <p>A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.</p>
/// <p>If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify <code>AWSCURRENT</code>, and it is already attached to another version, then Secrets Manager also moves the staging label <code>AWSPREVIOUS</code> to the version that <code>AWSCURRENT</code> was removed from.</p>
/// <p>If you don't include <code>VersionStages</code>, then Secrets Manager automatically moves the staging label <code>AWSCURRENT</code> to this version.</p>
pub fn set_version_stages(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.version_stages = input;
self
}
/// Consumes the builder and constructs a [`PutSecretValueInput`](crate::input::PutSecretValueInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutSecretValueInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::PutSecretValueInput {
secret_id: self.secret_id,
client_request_token: self.client_request_token,
secret_binary: self.secret_binary,
secret_string: self.secret_string,
version_stages: self.version_stages,
})
}
}
}
#[doc(hidden)]
pub type PutSecretValueInputOperationOutputAlias = crate::operation::PutSecretValue;
#[doc(hidden)]
pub type PutSecretValueInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl PutSecretValueInput {
/// Consumes the builder and constructs an Operation<[`PutSecretValue`](crate::operation::PutSecretValue)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
mut self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::PutSecretValue,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
if self.client_request_token.is_none() {
self.client_request_token = Some(_config.make_token.make_idempotency_token());
}
let mut request = {
fn uri_base(
_input: &crate::input::PutSecretValueInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::PutSecretValueInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.PutSecretValue",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_put_secret_value(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::PutSecretValue::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"PutSecretValue",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`PutSecretValueInput`](crate::input::PutSecretValueInput)
pub fn builder() -> crate::input::put_secret_value_input::Builder {
crate::input::put_secret_value_input::Builder::default()
}
}
/// See [`RemoveRegionsFromReplicationInput`](crate::input::RemoveRegionsFromReplicationInput)
pub mod remove_regions_from_replication_input {
/// A builder for [`RemoveRegionsFromReplicationInput`](crate::input::RemoveRegionsFromReplicationInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) remove_replica_regions: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The ARN or name of the secret.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Appends an item to `remove_replica_regions`.
///
/// To override the contents of this collection use [`set_remove_replica_regions`](Self::set_remove_replica_regions).
///
/// <p>The Regions of the replicas to remove.</p>
pub fn remove_replica_regions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.remove_replica_regions.unwrap_or_default();
v.push(input.into());
self.remove_replica_regions = Some(v);
self
}
/// <p>The Regions of the replicas to remove.</p>
pub fn set_remove_replica_regions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.remove_replica_regions = input;
self
}
/// Consumes the builder and constructs a [`RemoveRegionsFromReplicationInput`](crate::input::RemoveRegionsFromReplicationInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::RemoveRegionsFromReplicationInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::RemoveRegionsFromReplicationInput {
secret_id: self.secret_id,
remove_replica_regions: self.remove_replica_regions,
})
}
}
}
#[doc(hidden)]
pub type RemoveRegionsFromReplicationInputOperationOutputAlias =
crate::operation::RemoveRegionsFromReplication;
#[doc(hidden)]
pub type RemoveRegionsFromReplicationInputOperationRetryAlias =
aws_http::retry::AwsErrorRetryPolicy;
impl RemoveRegionsFromReplicationInput {
/// Consumes the builder and constructs an Operation<[`RemoveRegionsFromReplication`](crate::operation::RemoveRegionsFromReplication)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::RemoveRegionsFromReplication,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::RemoveRegionsFromReplicationInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::RemoveRegionsFromReplicationInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.RemoveRegionsFromReplication",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_remove_regions_from_replication(&self)?
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::RemoveRegionsFromReplication::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"RemoveRegionsFromReplication",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`RemoveRegionsFromReplicationInput`](crate::input::RemoveRegionsFromReplicationInput)
pub fn builder() -> crate::input::remove_regions_from_replication_input::Builder {
crate::input::remove_regions_from_replication_input::Builder::default()
}
}
/// See [`ReplicateSecretToRegionsInput`](crate::input::ReplicateSecretToRegionsInput)
pub mod replicate_secret_to_regions_input {
/// A builder for [`ReplicateSecretToRegionsInput`](crate::input::ReplicateSecretToRegionsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) add_replica_regions:
std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
pub(crate) force_overwrite_replica_secret: std::option::Option<bool>,
}
impl Builder {
/// <p>The ARN or name of the secret to replicate.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to replicate.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Appends an item to `add_replica_regions`.
///
/// To override the contents of this collection use [`set_add_replica_regions`](Self::set_add_replica_regions).
///
/// <p>A list of Regions in which to replicate the secret.</p>
pub fn add_replica_regions(mut self, input: crate::model::ReplicaRegionType) -> Self {
let mut v = self.add_replica_regions.unwrap_or_default();
v.push(input);
self.add_replica_regions = Some(v);
self
}
/// <p>A list of Regions in which to replicate the secret.</p>
pub fn set_add_replica_regions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
) -> Self {
self.add_replica_regions = input;
self
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn force_overwrite_replica_secret(mut self, input: bool) -> Self {
self.force_overwrite_replica_secret = Some(input);
self
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn set_force_overwrite_replica_secret(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.force_overwrite_replica_secret = input;
self
}
/// Consumes the builder and constructs a [`ReplicateSecretToRegionsInput`](crate::input::ReplicateSecretToRegionsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ReplicateSecretToRegionsInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ReplicateSecretToRegionsInput {
secret_id: self.secret_id,
add_replica_regions: self.add_replica_regions,
force_overwrite_replica_secret: self
.force_overwrite_replica_secret
.unwrap_or_default(),
})
}
}
}
#[doc(hidden)]
pub type ReplicateSecretToRegionsInputOperationOutputAlias =
crate::operation::ReplicateSecretToRegions;
#[doc(hidden)]
pub type ReplicateSecretToRegionsInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ReplicateSecretToRegionsInput {
/// Consumes the builder and constructs an Operation<[`ReplicateSecretToRegions`](crate::operation::ReplicateSecretToRegions)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ReplicateSecretToRegions,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ReplicateSecretToRegionsInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ReplicateSecretToRegionsInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.ReplicateSecretToRegions",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_replicate_secret_to_regions(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ReplicateSecretToRegions::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ReplicateSecretToRegions",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ReplicateSecretToRegionsInput`](crate::input::ReplicateSecretToRegionsInput)
pub fn builder() -> crate::input::replicate_secret_to_regions_input::Builder {
crate::input::replicate_secret_to_regions_input::Builder::default()
}
}
/// See [`RestoreSecretInput`](crate::input::RestoreSecretInput)
pub mod restore_secret_input {
/// A builder for [`RestoreSecretInput`](crate::input::RestoreSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret to restore.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to restore.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`RestoreSecretInput`](crate::input::RestoreSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::RestoreSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::RestoreSecretInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type RestoreSecretInputOperationOutputAlias = crate::operation::RestoreSecret;
#[doc(hidden)]
pub type RestoreSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl RestoreSecretInput {
/// Consumes the builder and constructs an Operation<[`RestoreSecret`](crate::operation::RestoreSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::RestoreSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::RestoreSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::RestoreSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.RestoreSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_restore_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::RestoreSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"RestoreSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`RestoreSecretInput`](crate::input::RestoreSecretInput)
pub fn builder() -> crate::input::restore_secret_input::Builder {
crate::input::restore_secret_input::Builder::default()
}
}
/// See [`RotateSecretInput`](crate::input::RotateSecretInput)
pub mod rotate_secret_input {
/// A builder for [`RotateSecretInput`](crate::input::RotateSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) client_request_token: std::option::Option<std::string::String>,
pub(crate) rotation_lambda_arn: std::option::Option<std::string::String>,
pub(crate) rotation_rules: std::option::Option<crate::model::RotationRulesType>,
pub(crate) rotate_immediately: std::option::Option<bool>,
}
impl Builder {
/// <p>The ARN or name of the secret to rotate.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret to rotate.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>A unique identifier for the new version of the secret that helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during rotation. This value becomes the <code>VersionId</code> of the new version.</p>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request.</p>
/// <p>You only need to specify this value if you implement your own retry logic and you want to ensure that Secrets Manager doesn't attempt to create a secret version twice. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self {
self.client_request_token = Some(input.into());
self
}
/// <p>A unique identifier for the new version of the secret that helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during rotation. This value becomes the <code>VersionId</code> of the new version.</p>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request.</p>
/// <p>You only need to specify this value if you implement your own retry logic and you want to ensure that Secrets Manager doesn't attempt to create a secret version twice. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.client_request_token = input;
self
}
/// <p>The ARN of the Lambda rotation function that can rotate the secret.</p>
pub fn rotation_lambda_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.rotation_lambda_arn = Some(input.into());
self
}
/// <p>The ARN of the Lambda rotation function that can rotate the secret.</p>
pub fn set_rotation_lambda_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.rotation_lambda_arn = input;
self
}
/// <p>A structure that defines the rotation configuration for this secret.</p>
pub fn rotation_rules(mut self, input: crate::model::RotationRulesType) -> Self {
self.rotation_rules = Some(input);
self
}
/// <p>A structure that defines the rotation configuration for this secret.</p>
pub fn set_rotation_rules(
mut self,
input: std::option::Option<crate::model::RotationRulesType>,
) -> Self {
self.rotation_rules = input;
self
}
/// <p>Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in <code>RotateSecretRequest$RotationRules</code>.</p>
/// <p>If you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html"> <code>testSecret</code> step</a> of the Lambda rotation function. The test creates an <code>AWSPENDING</code> version of the secret and then removes it.</p>
/// <p>If you don't specify this value, then by default, Secrets Manager rotates the secret immediately.</p>
pub fn rotate_immediately(mut self, input: bool) -> Self {
self.rotate_immediately = Some(input);
self
}
/// <p>Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in <code>RotateSecretRequest$RotationRules</code>.</p>
/// <p>If you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html"> <code>testSecret</code> step</a> of the Lambda rotation function. The test creates an <code>AWSPENDING</code> version of the secret and then removes it.</p>
/// <p>If you don't specify this value, then by default, Secrets Manager rotates the secret immediately.</p>
pub fn set_rotate_immediately(mut self, input: std::option::Option<bool>) -> Self {
self.rotate_immediately = input;
self
}
/// Consumes the builder and constructs a [`RotateSecretInput`](crate::input::RotateSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::RotateSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::RotateSecretInput {
secret_id: self.secret_id,
client_request_token: self.client_request_token,
rotation_lambda_arn: self.rotation_lambda_arn,
rotation_rules: self.rotation_rules,
rotate_immediately: self.rotate_immediately,
})
}
}
}
#[doc(hidden)]
pub type RotateSecretInputOperationOutputAlias = crate::operation::RotateSecret;
#[doc(hidden)]
pub type RotateSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl RotateSecretInput {
/// Consumes the builder and constructs an Operation<[`RotateSecret`](crate::operation::RotateSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
mut self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::RotateSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
if self.client_request_token.is_none() {
self.client_request_token = Some(_config.make_token.make_idempotency_token());
}
let mut request = {
fn uri_base(
_input: &crate::input::RotateSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::RotateSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.RotateSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_rotate_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::RotateSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"RotateSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`RotateSecretInput`](crate::input::RotateSecretInput)
pub fn builder() -> crate::input::rotate_secret_input::Builder {
crate::input::rotate_secret_input::Builder::default()
}
}
/// See [`StopReplicationToReplicaInput`](crate::input::StopReplicationToReplicaInput)
pub mod stop_replication_to_replica_input {
/// A builder for [`StopReplicationToReplicaInput`](crate::input::StopReplicationToReplicaInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of the primary secret. </p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN of the primary secret. </p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Consumes the builder and constructs a [`StopReplicationToReplicaInput`](crate::input::StopReplicationToReplicaInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::StopReplicationToReplicaInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::StopReplicationToReplicaInput {
secret_id: self.secret_id,
})
}
}
}
#[doc(hidden)]
pub type StopReplicationToReplicaInputOperationOutputAlias =
crate::operation::StopReplicationToReplica;
#[doc(hidden)]
pub type StopReplicationToReplicaInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl StopReplicationToReplicaInput {
/// Consumes the builder and constructs an Operation<[`StopReplicationToReplica`](crate::operation::StopReplicationToReplica)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::StopReplicationToReplica,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::StopReplicationToReplicaInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::StopReplicationToReplicaInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.StopReplicationToReplica",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_stop_replication_to_replica(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::StopReplicationToReplica::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"StopReplicationToReplica",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`StopReplicationToReplicaInput`](crate::input::StopReplicationToReplicaInput)
pub fn builder() -> crate::input::stop_replication_to_replica_input::Builder {
crate::input::stop_replication_to_replica_input::Builder::default()
}
}
/// See [`TagResourceInput`](crate::input::TagResourceInput)
pub mod tag_resource_input {
/// A builder for [`TagResourceInput`](crate::input::TagResourceInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The identifier for the secret to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The identifier for the secret to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>The tags to attach to the secret as a JSON text string argument. Each element in the list consists of a <code>Key</code> and a <code>Value</code>.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn tags(mut self, input: crate::model::Tag) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input);
self.tags = Some(v);
self
}
/// <p>The tags to attach to the secret as a JSON text string argument. Each element in the list consists of a <code>Key</code> and a <code>Value</code>.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`TagResourceInput`](crate::input::TagResourceInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::TagResourceInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::TagResourceInput {
secret_id: self.secret_id,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type TagResourceInputOperationOutputAlias = crate::operation::TagResource;
#[doc(hidden)]
pub type TagResourceInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl TagResourceInput {
/// Consumes the builder and constructs an Operation<[`TagResource`](crate::operation::TagResource)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::TagResource,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::TagResourceInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::TagResourceInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.TagResource",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_tag_resource(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::TagResource::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"TagResource",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput)
pub fn builder() -> crate::input::tag_resource_input::Builder {
crate::input::tag_resource_input::Builder::default()
}
}
/// See [`UntagResourceInput`](crate::input::UntagResourceInput)
pub mod untag_resource_input {
/// A builder for [`UntagResourceInput`](crate::input::UntagResourceInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) tag_keys: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// Appends an item to `tag_keys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// <p>A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.</p>
/// <p>This parameter requires a JSON text string argument.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.tag_keys.unwrap_or_default();
v.push(input.into());
self.tag_keys = Some(v);
self
}
/// <p>A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.</p>
/// <p>This parameter requires a JSON text string argument.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.tag_keys = input;
self
}
/// Consumes the builder and constructs a [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::UntagResourceInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::UntagResourceInput {
secret_id: self.secret_id,
tag_keys: self.tag_keys,
})
}
}
}
#[doc(hidden)]
pub type UntagResourceInputOperationOutputAlias = crate::operation::UntagResource;
#[doc(hidden)]
pub type UntagResourceInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl UntagResourceInput {
/// Consumes the builder and constructs an Operation<[`UntagResource`](crate::operation::UntagResource)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::UntagResource,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::UntagResourceInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::UntagResourceInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.UntagResource",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_untag_resource(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::UntagResource::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"UntagResource",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn builder() -> crate::input::untag_resource_input::Builder {
crate::input::untag_resource_input::Builder::default()
}
}
/// See [`UpdateSecretInput`](crate::input::UpdateSecretInput)
pub mod update_secret_input {
/// A builder for [`UpdateSecretInput`](crate::input::UpdateSecretInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) client_request_token: std::option::Option<std::string::String>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) kms_key_id: std::option::Option<std::string::String>,
pub(crate) secret_binary: std::option::Option<aws_smithy_types::Blob>,
pub(crate) secret_string: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates a new version for the secret, and this parameter specifies the unique identifier for the new version.</p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self {
self.client_request_token = Some(input.into());
self
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates a new version for the secret, and this parameter specifies the unique identifier for the new version.</p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn set_client_request_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.client_request_token = input;
self
}
/// <p>The description of the secret.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>The description of the secret.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions the staging labels <code>AWSCURRENT</code>, <code>AWSPENDING</code>, or <code>AWSPREVIOUS</code>. For more information about versions and staging labels, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version">Concepts: Version</a>.</p> <important>
/// <p>You can only use the Amazon Web Services managed key <code>aws/secretsmanager</code> if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.</p>
/// </important>
pub fn kms_key_id(mut self, input: impl Into<std::string::String>) -> Self {
self.kms_key_id = Some(input.into());
self
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions the staging labels <code>AWSCURRENT</code>, <code>AWSPENDING</code>, or <code>AWSPREVIOUS</code>. For more information about versions and staging labels, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version">Concepts: Version</a>.</p> <important>
/// <p>You can only use the Amazon Web Services managed key <code>aws/secretsmanager</code> if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.</p>
/// </important>
pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.kms_key_id = input;
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both.</p>
/// <p>You can't access this parameter in the Secrets Manager console.</p>
pub fn secret_binary(mut self, input: aws_smithy_types::Blob) -> Self {
self.secret_binary = Some(input);
self
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both.</p>
/// <p>You can't access this parameter in the Secrets Manager console.</p>
pub fn set_secret_binary(
mut self,
input: std::option::Option<aws_smithy_types::Blob>,
) -> Self {
self.secret_binary = input;
self
}
/// <p>The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both. </p>
pub fn secret_string(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_string = Some(input.into());
self
}
/// <p>The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both. </p>
pub fn set_secret_string(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.secret_string = input;
self
}
/// Consumes the builder and constructs a [`UpdateSecretInput`](crate::input::UpdateSecretInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::UpdateSecretInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::UpdateSecretInput {
secret_id: self.secret_id,
client_request_token: self.client_request_token,
description: self.description,
kms_key_id: self.kms_key_id,
secret_binary: self.secret_binary,
secret_string: self.secret_string,
})
}
}
}
#[doc(hidden)]
pub type UpdateSecretInputOperationOutputAlias = crate::operation::UpdateSecret;
#[doc(hidden)]
pub type UpdateSecretInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl UpdateSecretInput {
/// Consumes the builder and constructs an Operation<[`UpdateSecret`](crate::operation::UpdateSecret)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
mut self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::UpdateSecret,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
if self.client_request_token.is_none() {
self.client_request_token = Some(_config.make_token.make_idempotency_token());
}
let mut request = {
fn uri_base(
_input: &crate::input::UpdateSecretInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::UpdateSecretInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.UpdateSecret",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_update_secret(&self)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::UpdateSecret::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"UpdateSecret",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`UpdateSecretInput`](crate::input::UpdateSecretInput)
pub fn builder() -> crate::input::update_secret_input::Builder {
crate::input::update_secret_input::Builder::default()
}
}
/// See [`UpdateSecretVersionStageInput`](crate::input::UpdateSecretVersionStageInput)
pub mod update_secret_version_stage_input {
/// A builder for [`UpdateSecretVersionStageInput`](crate::input::UpdateSecretVersionStageInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) version_stage: std::option::Option<std::string::String>,
pub(crate) remove_from_version_id: std::option::Option<std::string::String>,
pub(crate) move_to_version_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN or the name of the secret with the version and staging labelsto modify.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>The ARN or the name of the secret with the version and staging labelsto modify.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>The staging label to add to this version.</p>
pub fn version_stage(mut self, input: impl Into<std::string::String>) -> Self {
self.version_stage = Some(input.into());
self
}
/// <p>The staging label to add to this version.</p>
pub fn set_version_stage(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.version_stage = input;
self
}
/// <p>The ID of the version that the staging label is to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.</p>
pub fn remove_from_version_id(mut self, input: impl Into<std::string::String>) -> Self {
self.remove_from_version_id = Some(input.into());
self
}
/// <p>The ID of the version that the staging label is to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.</p>
pub fn set_remove_from_version_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.remove_from_version_id = input;
self
}
/// <p>The ID of the version to add the staging label to. To remove a label from a version, then do not specify this parameter.</p>
/// <p>If the staging label is already attached to a different version of the secret, then you must also specify the <code>RemoveFromVersionId</code> parameter. </p>
pub fn move_to_version_id(mut self, input: impl Into<std::string::String>) -> Self {
self.move_to_version_id = Some(input.into());
self
}
/// <p>The ID of the version to add the staging label to. To remove a label from a version, then do not specify this parameter.</p>
/// <p>If the staging label is already attached to a different version of the secret, then you must also specify the <code>RemoveFromVersionId</code> parameter. </p>
pub fn set_move_to_version_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.move_to_version_id = input;
self
}
/// Consumes the builder and constructs a [`UpdateSecretVersionStageInput`](crate::input::UpdateSecretVersionStageInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::UpdateSecretVersionStageInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::UpdateSecretVersionStageInput {
secret_id: self.secret_id,
version_stage: self.version_stage,
remove_from_version_id: self.remove_from_version_id,
move_to_version_id: self.move_to_version_id,
})
}
}
}
#[doc(hidden)]
pub type UpdateSecretVersionStageInputOperationOutputAlias =
crate::operation::UpdateSecretVersionStage;
#[doc(hidden)]
pub type UpdateSecretVersionStageInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl UpdateSecretVersionStageInput {
/// Consumes the builder and constructs an Operation<[`UpdateSecretVersionStage`](crate::operation::UpdateSecretVersionStage)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::UpdateSecretVersionStage,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::UpdateSecretVersionStageInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::UpdateSecretVersionStageInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.UpdateSecretVersionStage",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_update_secret_version_stage(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::UpdateSecretVersionStage::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"UpdateSecretVersionStage",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`UpdateSecretVersionStageInput`](crate::input::UpdateSecretVersionStageInput)
pub fn builder() -> crate::input::update_secret_version_stage_input::Builder {
crate::input::update_secret_version_stage_input::Builder::default()
}
}
/// See [`ValidateResourcePolicyInput`](crate::input::ValidateResourcePolicyInput)
pub mod validate_resource_policy_input {
/// A builder for [`ValidateResourcePolicyInput`](crate::input::ValidateResourcePolicyInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) secret_id: std::option::Option<std::string::String>,
pub(crate) resource_policy: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>This field is reserved for internal use.</p>
pub fn secret_id(mut self, input: impl Into<std::string::String>) -> Self {
self.secret_id = Some(input.into());
self
}
/// <p>This field is reserved for internal use.</p>
pub fn set_secret_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.secret_id = input;
self
}
/// <p>A JSON-formatted string that contains an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn resource_policy(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_policy = Some(input.into());
self
}
/// <p>A JSON-formatted string that contains an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn set_resource_policy(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.resource_policy = input;
self
}
/// Consumes the builder and constructs a [`ValidateResourcePolicyInput`](crate::input::ValidateResourcePolicyInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ValidateResourcePolicyInput,
aws_smithy_http::operation::BuildError,
> {
Ok(crate::input::ValidateResourcePolicyInput {
secret_id: self.secret_id,
resource_policy: self.resource_policy,
})
}
}
}
#[doc(hidden)]
pub type ValidateResourcePolicyInputOperationOutputAlias = crate::operation::ValidateResourcePolicy;
#[doc(hidden)]
pub type ValidateResourcePolicyInputOperationRetryAlias = aws_http::retry::AwsErrorRetryPolicy;
impl ValidateResourcePolicyInput {
/// Consumes the builder and constructs an Operation<[`ValidateResourcePolicy`](crate::operation::ValidateResourcePolicy)>
#[allow(unused_mut)]
#[allow(clippy::let_and_return)]
#[allow(clippy::needless_borrow)]
pub async fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
aws_smithy_http::operation::Operation<
crate::operation::ValidateResourcePolicy,
aws_http::retry::AwsErrorRetryPolicy,
>,
aws_smithy_http::operation::BuildError,
> {
let mut request = {
fn uri_base(
_input: &crate::input::ValidateResourcePolicyInput,
output: &mut String,
) -> Result<(), aws_smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
input: &crate::input::ValidateResourcePolicyInput,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError>
{
let mut uri = String::new();
uri_base(input, &mut uri)?;
Ok(builder.method("POST").uri(uri))
}
let mut builder = update_http_builder(&self, http::request::Builder::new())?;
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::CONTENT_TYPE,
"application/x-amz-json-1.1",
);
builder = aws_smithy_http::header::set_request_header_if_absent(
builder,
http::header::HeaderName::from_static("x-amz-target"),
"secretsmanager.ValidateResourcePolicy",
);
builder
};
let mut properties = aws_smithy_http::property_bag::SharedPropertyBag::new();
#[allow(clippy::useless_conversion)]
let body = aws_smithy_http::body::SdkBody::from(
crate::operation_ser::serialize_operation_crate_operation_validate_resource_policy(
&self,
)?,
);
if let Some(content_length) = body.content_length() {
request = aws_smithy_http::header::set_request_header_if_absent(
request,
http::header::CONTENT_LENGTH,
content_length,
);
}
let request = request.body(body).expect("should be valid request");
let mut request = aws_smithy_http::operation::Request::from_parts(request, properties);
let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment(
aws_types::os_shim_internal::Env::real(),
crate::API_METADATA.clone(),
);
if let Some(app_name) = _config.app_name() {
user_agent = user_agent.with_app_name(app_name.clone());
}
request.properties_mut().insert(user_agent);
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.properties_mut().insert(signing_config);
request
.properties_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.properties_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.properties_mut().insert(region.clone());
}
aws_http::auth::set_provider(
&mut request.properties_mut(),
_config.credentials_provider.clone(),
);
let op = aws_smithy_http::operation::Operation::new(
request,
crate::operation::ValidateResourcePolicy::new(),
)
.with_metadata(aws_smithy_http::operation::Metadata::new(
"ValidateResourcePolicy",
"secretsmanager",
));
let op = op.with_retry_policy(aws_http::retry::AwsErrorRetryPolicy::new());
Ok(op)
}
/// Creates a new builder-style object to manufacture [`ValidateResourcePolicyInput`](crate::input::ValidateResourcePolicyInput)
pub fn builder() -> crate::input::validate_resource_policy_input::Builder {
crate::input::validate_resource_policy_input::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ValidateResourcePolicyInput {
/// <p>This field is reserved for internal use.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A JSON-formatted string that contains an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub resource_policy: std::option::Option<std::string::String>,
}
impl ValidateResourcePolicyInput {
/// <p>This field is reserved for internal use.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A JSON-formatted string that contains an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn resource_policy(&self) -> std::option::Option<&str> {
self.resource_policy.as_deref()
}
}
impl std::fmt::Debug for ValidateResourcePolicyInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ValidateResourcePolicyInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("resource_policy", &self.resource_policy);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateSecretVersionStageInput {
/// <p>The ARN or the name of the secret with the version and staging labelsto modify.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The staging label to add to this version.</p>
pub version_stage: std::option::Option<std::string::String>,
/// <p>The ID of the version that the staging label is to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.</p>
pub remove_from_version_id: std::option::Option<std::string::String>,
/// <p>The ID of the version to add the staging label to. To remove a label from a version, then do not specify this parameter.</p>
/// <p>If the staging label is already attached to a different version of the secret, then you must also specify the <code>RemoveFromVersionId</code> parameter. </p>
pub move_to_version_id: std::option::Option<std::string::String>,
}
impl UpdateSecretVersionStageInput {
/// <p>The ARN or the name of the secret with the version and staging labelsto modify.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The staging label to add to this version.</p>
pub fn version_stage(&self) -> std::option::Option<&str> {
self.version_stage.as_deref()
}
/// <p>The ID of the version that the staging label is to be removed from. If the staging label you are trying to attach to one version is already attached to a different version, then you must include this parameter and specify the version that the label is to be removed from. If the label is attached and you either do not specify this parameter, or the version ID does not match, then the operation fails.</p>
pub fn remove_from_version_id(&self) -> std::option::Option<&str> {
self.remove_from_version_id.as_deref()
}
/// <p>The ID of the version to add the staging label to. To remove a label from a version, then do not specify this parameter.</p>
/// <p>If the staging label is already attached to a different version of the secret, then you must also specify the <code>RemoveFromVersionId</code> parameter. </p>
pub fn move_to_version_id(&self) -> std::option::Option<&str> {
self.move_to_version_id.as_deref()
}
}
impl std::fmt::Debug for UpdateSecretVersionStageInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateSecretVersionStageInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("version_stage", &self.version_stage);
formatter.field("remove_from_version_id", &self.remove_from_version_id);
formatter.field("move_to_version_id", &self.move_to_version_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UpdateSecretInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates a new version for the secret, and this parameter specifies the unique identifier for the new version.</p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub client_request_token: std::option::Option<std::string::String>,
/// <p>The description of the secret.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions the staging labels <code>AWSCURRENT</code>, <code>AWSPENDING</code>, or <code>AWSPREVIOUS</code>. For more information about versions and staging labels, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version">Concepts: Version</a>.</p> <important>
/// <p>You can only use the Amazon Web Services managed key <code>aws/secretsmanager</code> if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.</p>
/// </important>
pub kms_key_id: std::option::Option<std::string::String>,
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both.</p>
/// <p>You can't access this parameter in the Secrets Manager console.</p>
pub secret_binary: std::option::Option<aws_smithy_types::Blob>,
/// <p>The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both. </p>
pub secret_string: std::option::Option<std::string::String>,
}
impl UpdateSecretInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates a new version for the secret, and this parameter specifies the unique identifier for the new version.</p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(&self) -> std::option::Option<&str> {
self.client_request_token.as_deref()
}
/// <p>The description of the secret.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions the staging labels <code>AWSCURRENT</code>, <code>AWSPENDING</code>, or <code>AWSPREVIOUS</code>. For more information about versions and staging labels, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/getting-started.html#term_version">Concepts: Version</a>.</p> <important>
/// <p>You can only use the Amazon Web Services managed key <code>aws/secretsmanager</code> if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts.</p>
/// </important>
pub fn kms_key_id(&self) -> std::option::Option<&str> {
self.kms_key_id.as_deref()
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both.</p>
/// <p>You can't access this parameter in the Secrets Manager console.</p>
pub fn secret_binary(&self) -> std::option::Option<&aws_smithy_types::Blob> {
self.secret_binary.as_ref()
}
/// <p>The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. </p>
/// <p>Either <code>SecretBinary</code> or <code>SecretString</code> must have a value, but not both. </p>
pub fn secret_string(&self) -> std::option::Option<&str> {
self.secret_string.as_deref()
}
}
impl std::fmt::Debug for UpdateSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UpdateSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("client_request_token", &self.client_request_token);
formatter.field("description", &self.description);
formatter.field("kms_key_id", &self.kms_key_id);
formatter.field("secret_binary", &"*** Sensitive Data Redacted ***");
formatter.field("secret_string", &"*** Sensitive Data Redacted ***");
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UntagResourceInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.</p>
/// <p>This parameter requires a JSON text string argument.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub tag_keys: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl UntagResourceInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A list of tag key names to remove from the secret. You don't specify the value. Both the key and its associated value are removed.</p>
/// <p>This parameter requires a JSON text string argument.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn tag_keys(&self) -> std::option::Option<&[std::string::String]> {
self.tag_keys.as_deref()
}
}
impl std::fmt::Debug for UntagResourceInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UntagResourceInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("tag_keys", &self.tag_keys);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TagResourceInput {
/// <p>The identifier for the secret to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The tags to attach to the secret as a JSON text string argument. Each element in the list consists of a <code>Key</code> and a <code>Value</code>.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl TagResourceInput {
/// <p>The identifier for the secret to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The tags to attach to the secret as a JSON text string argument. Each element in the list consists of a <code>Key</code> and a <code>Value</code>.</p>
/// <p>For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For more information, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html">Specifying parameter values for the Amazon Web Services CLI</a> in the Amazon Web Services CLI User Guide.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
}
impl std::fmt::Debug for TagResourceInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TagResourceInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StopReplicationToReplicaInput {
/// <p>The ARN of the primary secret. </p>
pub secret_id: std::option::Option<std::string::String>,
}
impl StopReplicationToReplicaInput {
/// <p>The ARN of the primary secret. </p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for StopReplicationToReplicaInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StopReplicationToReplicaInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RotateSecretInput {
/// <p>The ARN or name of the secret to rotate.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the new version of the secret that helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during rotation. This value becomes the <code>VersionId</code> of the new version.</p>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request.</p>
/// <p>You only need to specify this value if you implement your own retry logic and you want to ensure that Secrets Manager doesn't attempt to create a secret version twice. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
pub client_request_token: std::option::Option<std::string::String>,
/// <p>The ARN of the Lambda rotation function that can rotate the secret.</p>
pub rotation_lambda_arn: std::option::Option<std::string::String>,
/// <p>A structure that defines the rotation configuration for this secret.</p>
pub rotation_rules: std::option::Option<crate::model::RotationRulesType>,
/// <p>Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in <code>RotateSecretRequest$RotationRules</code>.</p>
/// <p>If you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html"> <code>testSecret</code> step</a> of the Lambda rotation function. The test creates an <code>AWSPENDING</code> version of the secret and then removes it.</p>
/// <p>If you don't specify this value, then by default, Secrets Manager rotates the secret immediately.</p>
pub rotate_immediately: std::option::Option<bool>,
}
impl RotateSecretInput {
/// <p>The ARN or name of the secret to rotate.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A unique identifier for the new version of the secret that helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during rotation. This value becomes the <code>VersionId</code> of the new version.</p>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request.</p>
/// <p>You only need to specify this value if you implement your own retry logic and you want to ensure that Secrets Manager doesn't attempt to create a secret version twice. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
pub fn client_request_token(&self) -> std::option::Option<&str> {
self.client_request_token.as_deref()
}
/// <p>The ARN of the Lambda rotation function that can rotate the secret.</p>
pub fn rotation_lambda_arn(&self) -> std::option::Option<&str> {
self.rotation_lambda_arn.as_deref()
}
/// <p>A structure that defines the rotation configuration for this secret.</p>
pub fn rotation_rules(&self) -> std::option::Option<&crate::model::RotationRulesType> {
self.rotation_rules.as_ref()
}
/// <p>Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in <code>RotateSecretRequest$RotationRules</code>.</p>
/// <p>If you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html"> <code>testSecret</code> step</a> of the Lambda rotation function. The test creates an <code>AWSPENDING</code> version of the secret and then removes it.</p>
/// <p>If you don't specify this value, then by default, Secrets Manager rotates the secret immediately.</p>
pub fn rotate_immediately(&self) -> std::option::Option<bool> {
self.rotate_immediately
}
}
impl std::fmt::Debug for RotateSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RotateSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("client_request_token", &self.client_request_token);
formatter.field("rotation_lambda_arn", &self.rotation_lambda_arn);
formatter.field("rotation_rules", &self.rotation_rules);
formatter.field("rotate_immediately", &self.rotate_immediately);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RestoreSecretInput {
/// <p>The ARN or name of the secret to restore.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
}
impl RestoreSecretInput {
/// <p>The ARN or name of the secret to restore.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for RestoreSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RestoreSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ReplicateSecretToRegionsInput {
/// <p>The ARN or name of the secret to replicate.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A list of Regions in which to replicate the secret.</p>
pub add_replica_regions: std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub force_overwrite_replica_secret: bool,
}
impl ReplicateSecretToRegionsInput {
/// <p>The ARN or name of the secret to replicate.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A list of Regions in which to replicate the secret.</p>
pub fn add_replica_regions(&self) -> std::option::Option<&[crate::model::ReplicaRegionType]> {
self.add_replica_regions.as_deref()
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn force_overwrite_replica_secret(&self) -> bool {
self.force_overwrite_replica_secret
}
}
impl std::fmt::Debug for ReplicateSecretToRegionsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ReplicateSecretToRegionsInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("add_replica_regions", &self.add_replica_regions);
formatter.field(
"force_overwrite_replica_secret",
&self.force_overwrite_replica_secret,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct RemoveRegionsFromReplicationInput {
/// <p>The ARN or name of the secret.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The Regions of the replicas to remove.</p>
pub remove_replica_regions: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl RemoveRegionsFromReplicationInput {
/// <p>The ARN or name of the secret.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The Regions of the replicas to remove.</p>
pub fn remove_replica_regions(&self) -> std::option::Option<&[std::string::String]> {
self.remove_replica_regions.as_deref()
}
}
impl std::fmt::Debug for RemoveRegionsFromReplicationInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("RemoveRegionsFromReplicationInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("remove_replica_regions", &self.remove_replica_regions);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutSecretValueInput {
/// <p>The ARN or name of the secret to add a new version to.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
/// <p>If the secret doesn't already exist, use <code>CreateSecret</code> instead.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A unique identifier for the new version of the secret. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty because they generate a random UUID for you. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request. </p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function processing. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> or <code>SecretBinary</code> values are the same as those in the request then the request is ignored. The operation is idempotent. </p> </li>
/// <li> <p>If a version with this value already exists and the version of the <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub client_request_token: std::option::Option<std::string::String>,
/// <p>The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>You can't access this value from the Secrets Manager console.</p>
pub secret_binary: std::option::Option<aws_smithy_types::Blob>,
/// <p>The text to encrypt and store in the new version of the secret. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>We recommend you create the secret string as JSON key/value pairs, as shown in the example.</p>
pub secret_string: std::option::Option<std::string::String>,
/// <p>A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.</p>
/// <p>If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify <code>AWSCURRENT</code>, and it is already attached to another version, then Secrets Manager also moves the staging label <code>AWSPREVIOUS</code> to the version that <code>AWSCURRENT</code> was removed from.</p>
/// <p>If you don't include <code>VersionStages</code>, then Secrets Manager automatically moves the staging label <code>AWSCURRENT</code> to this version.</p>
pub version_stages: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl PutSecretValueInput {
/// <p>The ARN or name of the secret to add a new version to.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
/// <p>If the secret doesn't already exist, use <code>CreateSecret</code> instead.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A unique identifier for the new version of the secret. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty because they generate a random UUID for you. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for new versions and include that value in the request. </p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function processing. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> or <code>SecretBinary</code> values are the same as those in the request then the request is ignored. The operation is idempotent. </p> </li>
/// <li> <p>If a version with this value already exists and the version of the <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(&self) -> std::option::Option<&str> {
self.client_request_token.as_deref()
}
/// <p>The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>You can't access this value from the Secrets Manager console.</p>
pub fn secret_binary(&self) -> std::option::Option<&aws_smithy_types::Blob> {
self.secret_binary.as_ref()
}
/// <p>The text to encrypt and store in the new version of the secret. </p>
/// <p>You must include <code>SecretBinary</code> or <code>SecretString</code>, but not both.</p>
/// <p>We recommend you create the secret string as JSON key/value pairs, as shown in the example.</p>
pub fn secret_string(&self) -> std::option::Option<&str> {
self.secret_string.as_deref()
}
/// <p>A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.</p>
/// <p>If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify <code>AWSCURRENT</code>, and it is already attached to another version, then Secrets Manager also moves the staging label <code>AWSPREVIOUS</code> to the version that <code>AWSCURRENT</code> was removed from.</p>
/// <p>If you don't include <code>VersionStages</code>, then Secrets Manager automatically moves the staging label <code>AWSCURRENT</code> to this version.</p>
pub fn version_stages(&self) -> std::option::Option<&[std::string::String]> {
self.version_stages.as_deref()
}
}
impl std::fmt::Debug for PutSecretValueInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutSecretValueInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("client_request_token", &self.client_request_token);
formatter.field("secret_binary", &"*** Sensitive Data Redacted ***");
formatter.field("secret_string", &"*** Sensitive Data Redacted ***");
formatter.field("version_stages", &self.version_stages);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutResourcePolicyInput {
/// <p>The ARN or name of the secret to attach the resource-based policy.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>A JSON-formatted string for an Amazon Web Services resource-based policy. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub resource_policy: std::option::Option<std::string::String>,
/// <p>Specifies whether to block resource-based policies that allow broad access to the secret. By default, Secrets Manager blocks policies that allow broad access, for example those that use a wildcard for the principal.</p>
pub block_public_policy: std::option::Option<bool>,
}
impl PutResourcePolicyInput {
/// <p>The ARN or name of the secret to attach the resource-based policy.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>A JSON-formatted string for an Amazon Web Services resource-based policy. For example policies, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html">Permissions policy examples</a>.</p>
pub fn resource_policy(&self) -> std::option::Option<&str> {
self.resource_policy.as_deref()
}
/// <p>Specifies whether to block resource-based policies that allow broad access to the secret. By default, Secrets Manager blocks policies that allow broad access, for example those that use a wildcard for the principal.</p>
pub fn block_public_policy(&self) -> std::option::Option<bool> {
self.block_public_policy
}
}
impl std::fmt::Debug for PutResourcePolicyInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutResourcePolicyInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("resource_policy", &self.resource_policy);
formatter.field("block_public_policy", &self.block_public_policy);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListSecretVersionIdsInput {
/// <p>The ARN or name of the secret whose versions you want to list.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecretVersionIds</code> again with the value from <code>NextToken</code>. </p>
pub max_results: std::option::Option<i32>,
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecretVersionIds</code> again with this value.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>Specifies whether to include versions of secrets that don't have any staging labels attached to them. Versions without staging labels are considered deprecated and are subject to deletion by Secrets Manager.</p>
pub include_deprecated: std::option::Option<bool>,
}
impl ListSecretVersionIdsInput {
/// <p>The ARN or name of the secret whose versions you want to list.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecretVersionIds</code> again with the value from <code>NextToken</code>. </p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecretVersionIds</code> again with this value.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>Specifies whether to include versions of secrets that don't have any staging labels attached to them. Versions without staging labels are considered deprecated and are subject to deletion by Secrets Manager.</p>
pub fn include_deprecated(&self) -> std::option::Option<bool> {
self.include_deprecated
}
}
impl std::fmt::Debug for ListSecretVersionIdsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListSecretVersionIdsInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("max_results", &self.max_results);
formatter.field("next_token", &self.next_token);
formatter.field("include_deprecated", &self.include_deprecated);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListSecretsInput {
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecrets</code> again with the value from <code>NextToken</code>.</p>
pub max_results: std::option::Option<i32>,
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecrets</code> again with this value.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The filters to apply to the list of secrets.</p>
pub filters: std::option::Option<std::vec::Vec<crate::model::Filter>>,
/// <p>Lists secrets in the requested order. </p>
pub sort_order: std::option::Option<crate::model::SortOrderType>,
}
impl ListSecretsInput {
/// <p>The number of results to include in the response.</p>
/// <p>If there are more results available, in the response, Secrets Manager includes <code>NextToken</code>. To get the next results, call <code>ListSecrets</code> again with the value from <code>NextToken</code>.</p>
pub fn max_results(&self) -> std::option::Option<i32> {
self.max_results
}
/// <p>A token that indicates where the output should continue from, if a previous call did not show all results. To get the next results, call <code>ListSecrets</code> again with this value.</p>
pub fn next_token(&self) -> std::option::Option<&str> {
self.next_token.as_deref()
}
/// <p>The filters to apply to the list of secrets.</p>
pub fn filters(&self) -> std::option::Option<&[crate::model::Filter]> {
self.filters.as_deref()
}
/// <p>Lists secrets in the requested order. </p>
pub fn sort_order(&self) -> std::option::Option<&crate::model::SortOrderType> {
self.sort_order.as_ref()
}
}
impl std::fmt::Debug for ListSecretsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListSecretsInput");
formatter.field("max_results", &self.max_results);
formatter.field("next_token", &self.next_token);
formatter.field("filters", &self.filters);
formatter.field("sort_order", &self.sort_order);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetSecretValueInput {
/// <p>The ARN or name of the secret to retrieve.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The unique identifier of the version of the secret to retrieve. If you include both this parameter and <code>VersionStage</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, then Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
/// <p>This value is typically a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value with 32 hexadecimal digits.</p>
pub version_id: std::option::Option<std::string::String>,
/// <p>The staging label of the version of the secret to retrieve. </p>
/// <p>Secrets Manager uses staging labels to keep track of different versions during the rotation process. If you include both this parameter and <code>VersionId</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
pub version_stage: std::option::Option<std::string::String>,
}
impl GetSecretValueInput {
/// <p>The ARN or name of the secret to retrieve.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The unique identifier of the version of the secret to retrieve. If you include both this parameter and <code>VersionStage</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, then Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
/// <p>This value is typically a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value with 32 hexadecimal digits.</p>
pub fn version_id(&self) -> std::option::Option<&str> {
self.version_id.as_deref()
}
/// <p>The staging label of the version of the secret to retrieve. </p>
/// <p>Secrets Manager uses staging labels to keep track of different versions during the rotation process. If you include both this parameter and <code>VersionId</code>, the two parameters must refer to the same secret version. If you don't specify either a <code>VersionStage</code> or <code>VersionId</code>, Secrets Manager returns the <code>AWSCURRENT</code> version.</p>
pub fn version_stage(&self) -> std::option::Option<&str> {
self.version_stage.as_deref()
}
}
impl std::fmt::Debug for GetSecretValueInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetSecretValueInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("version_id", &self.version_id);
formatter.field("version_stage", &self.version_stage);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetResourcePolicyInput {
/// <p>The ARN or name of the secret to retrieve the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
}
impl GetResourcePolicyInput {
/// <p>The ARN or name of the secret to retrieve the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for GetResourcePolicyInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetResourcePolicyInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetRandomPasswordInput {
/// <p>The length of the password. If you don't include this parameter, the default length is 32 characters.</p>
pub password_length: std::option::Option<i64>,
/// <p>A string of the characters that you don't want in the password.</p>
pub exclude_characters: std::option::Option<std::string::String>,
/// <p>Specifies whether to exclude numbers from the password. If you don't include this switch, the password can contain numbers.</p>
pub exclude_numbers: std::option::Option<bool>,
/// <p>Specifies whether to exclude the following punctuation characters from the password: <code>! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~</code>. If you don't include this switch, the password can contain punctuation.</p>
pub exclude_punctuation: std::option::Option<bool>,
/// <p>Specifies whether to exclude uppercase letters from the password. If you don't include this switch, the password can contain uppercase letters.</p>
pub exclude_uppercase: std::option::Option<bool>,
/// <p>Specifies whether to exclude lowercase letters from the password. If you don't include this switch, the password can contain lowercase letters.</p>
pub exclude_lowercase: std::option::Option<bool>,
/// <p>Specifies whether to include the space character. If you include this switch, the password can contain space characters.</p>
pub include_space: std::option::Option<bool>,
/// <p>Specifies whether to include at least one upper and lowercase letter, one number, and one punctuation. If you don't include this switch, the password contains at least one of every character type.</p>
pub require_each_included_type: std::option::Option<bool>,
}
impl GetRandomPasswordInput {
/// <p>The length of the password. If you don't include this parameter, the default length is 32 characters.</p>
pub fn password_length(&self) -> std::option::Option<i64> {
self.password_length
}
/// <p>A string of the characters that you don't want in the password.</p>
pub fn exclude_characters(&self) -> std::option::Option<&str> {
self.exclude_characters.as_deref()
}
/// <p>Specifies whether to exclude numbers from the password. If you don't include this switch, the password can contain numbers.</p>
pub fn exclude_numbers(&self) -> std::option::Option<bool> {
self.exclude_numbers
}
/// <p>Specifies whether to exclude the following punctuation characters from the password: <code>! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~</code>. If you don't include this switch, the password can contain punctuation.</p>
pub fn exclude_punctuation(&self) -> std::option::Option<bool> {
self.exclude_punctuation
}
/// <p>Specifies whether to exclude uppercase letters from the password. If you don't include this switch, the password can contain uppercase letters.</p>
pub fn exclude_uppercase(&self) -> std::option::Option<bool> {
self.exclude_uppercase
}
/// <p>Specifies whether to exclude lowercase letters from the password. If you don't include this switch, the password can contain lowercase letters.</p>
pub fn exclude_lowercase(&self) -> std::option::Option<bool> {
self.exclude_lowercase
}
/// <p>Specifies whether to include the space character. If you include this switch, the password can contain space characters.</p>
pub fn include_space(&self) -> std::option::Option<bool> {
self.include_space
}
/// <p>Specifies whether to include at least one upper and lowercase letter, one number, and one punctuation. If you don't include this switch, the password contains at least one of every character type.</p>
pub fn require_each_included_type(&self) -> std::option::Option<bool> {
self.require_each_included_type
}
}
impl std::fmt::Debug for GetRandomPasswordInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetRandomPasswordInput");
formatter.field("password_length", &self.password_length);
formatter.field("exclude_characters", &self.exclude_characters);
formatter.field("exclude_numbers", &self.exclude_numbers);
formatter.field("exclude_punctuation", &self.exclude_punctuation);
formatter.field("exclude_uppercase", &self.exclude_uppercase);
formatter.field("exclude_lowercase", &self.exclude_lowercase);
formatter.field("include_space", &self.include_space);
formatter.field(
"require_each_included_type",
&self.require_each_included_type,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeSecretInput {
/// <p>The ARN or name of the secret. </p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
}
impl DescribeSecretInput {
/// <p>The ARN or name of the secret. </p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for DescribeSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteSecretInput {
/// <p>The ARN or name of the secret to delete.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
/// <p>The number of days from 7 to 30 that Secrets Manager waits before permanently deleting the secret. You can't use both this parameter and <code>ForceDeleteWithoutRecovery</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
pub recovery_window_in_days: std::option::Option<i64>,
/// <p>Specifies whether to delete the secret without any recovery window. You can't use both this parameter and <code>RecoveryWindowInDays</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
/// <p>Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.</p> <important>
/// <p>Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the <code>RecoveryWindowInDays</code> parameter. If you delete a secret with the <code>ForceDeleteWithouRecovery</code> parameter, then you have no opportunity to recover the secret. You lose the secret permanently.</p>
/// </important>
pub force_delete_without_recovery: std::option::Option<bool>,
}
impl DeleteSecretInput {
/// <p>The ARN or name of the secret to delete.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
/// <p>The number of days from 7 to 30 that Secrets Manager waits before permanently deleting the secret. You can't use both this parameter and <code>ForceDeleteWithoutRecovery</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
pub fn recovery_window_in_days(&self) -> std::option::Option<i64> {
self.recovery_window_in_days
}
/// <p>Specifies whether to delete the secret without any recovery window. You can't use both this parameter and <code>RecoveryWindowInDays</code> in the same call. If you don't use either, then Secrets Manager defaults to a 30 day recovery window.</p>
/// <p>Secrets Manager performs the actual deletion with an asynchronous background process, so there might be a short delay before the secret is permanently deleted. If you delete a secret and then immediately create a secret with the same name, use appropriate back off and retry logic.</p> <important>
/// <p>Use this parameter with caution. This parameter causes the operation to skip the normal recovery window before the permanent deletion that Secrets Manager would normally impose with the <code>RecoveryWindowInDays</code> parameter. If you delete a secret with the <code>ForceDeleteWithouRecovery</code> parameter, then you have no opportunity to recover the secret. You lose the secret permanently.</p>
/// </important>
pub fn force_delete_without_recovery(&self) -> std::option::Option<bool> {
self.force_delete_without_recovery
}
}
impl std::fmt::Debug for DeleteSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.field("recovery_window_in_days", &self.recovery_window_in_days);
formatter.field(
"force_delete_without_recovery",
&self.force_delete_without_recovery,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteResourcePolicyInput {
/// <p>The ARN or name of the secret to delete the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
}
impl DeleteResourcePolicyInput {
/// <p>The ARN or name of the secret to delete the attached resource-based policy for.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for DeleteResourcePolicyInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteResourcePolicyInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CreateSecretInput {
/// <p>The name of the new secret.</p>
/// <p>The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@-</p>
/// <p>Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters after the secret name at the end of the ARN.</p>
pub name: std::option::Option<std::string::String>,
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates an initial version for the secret, and this parameter specifies the unique identifier for the new version. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness of your versions within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and the version <code>SecretString</code> and <code>SecretBinary</code> values are the same as those in the request, then the request is ignored.</p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use <code>PutSecretValue</code> to create a new version.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub client_request_token: std::option::Option<std::string::String>,
/// <p>The description of the secret.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt the secret value in the secret.</p>
/// <p>To use a KMS key in a different account, use the key ARN or the alias ARN.</p>
/// <p>If you don't specify this value, then Secrets Manager uses the key <code>aws/secretsmanager</code>. If that key doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.</p>
/// <p>If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use <code>aws/secretsmanager</code> to encrypt the secret, and you must create and use a customer managed KMS key. </p>
pub kms_key_id: std::option::Option<std::string::String>,
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>This parameter is not available in the Secrets Manager console.</p>
pub secret_binary: std::option::Option<aws_smithy_types::Blob>,
/// <p>The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the <code>SecretString</code> parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.</p>
pub secret_string: std::option::Option<std::string::String>,
/// <p>A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example:</p>
/// <p> <code>[{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}]</code> </p>
/// <p>Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a different tag from one with key "abc".</p>
/// <p>If you check tags in permissions policies as part of your security strategy, then adding or removing a tag can change permissions. If the completion of this operation would result in you losing your permissions for this secret, then Secrets Manager blocks the operation and returns an <code>Access Denied</code> error. For more information, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac">Control access to secrets using tags</a> and <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2">Limit access to identities with tags that match secrets' tags</a>.</p>
/// <p>For information about how to format a JSON parameter for the various command line tool environments, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json">Using JSON for Parameters</a>. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.</p>
/// <p>The following restrictions apply to tags:</p>
/// <ul>
/// <li> <p>Maximum number of tags per secret: 50</p> </li>
/// <li> <p>Maximum key length: 127 Unicode characters in UTF-8</p> </li>
/// <li> <p>Maximum value length: 255 Unicode characters in UTF-8</p> </li>
/// <li> <p>Tag keys and values are case sensitive.</p> </li>
/// <li> <p>Do not use the <code>aws:</code> prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.</p> </li>
/// <li> <p>If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.</p> </li>
/// </ul>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p>A list of Regions and KMS keys to replicate secrets.</p>
pub add_replica_regions: std::option::Option<std::vec::Vec<crate::model::ReplicaRegionType>>,
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub force_overwrite_replica_secret: bool,
}
impl CreateSecretInput {
/// <p>The name of the new secret.</p>
/// <p>The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@-</p>
/// <p>Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters after the secret name at the end of the ARN.</p>
pub fn name(&self) -> std::option::Option<&str> {
self.name.as_deref()
}
/// <p>If you include <code>SecretString</code> or <code>SecretBinary</code>, then Secrets Manager creates an initial version for the secret, and this parameter specifies the unique identifier for the new version. </p> <note>
/// <p>If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a <code>ClientRequestToken</code> yourself for the new version and include the value in the request.</p>
/// </note>
/// <p>This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a <a href="https://wikipedia.org/wiki/Universally_unique_identifier">UUID-type</a> value to ensure uniqueness of your versions within the specified secret. </p>
/// <ul>
/// <li> <p>If the <code>ClientRequestToken</code> value isn't already associated with a version of the secret then a new version of the secret is created. </p> </li>
/// <li> <p>If a version with this value already exists and the version <code>SecretString</code> and <code>SecretBinary</code> values are the same as those in the request, then the request is ignored.</p> </li>
/// <li> <p>If a version with this value already exists and that version's <code>SecretString</code> and <code>SecretBinary</code> values are different from those in the request, then the request fails because you cannot modify an existing version. Instead, use <code>PutSecretValue</code> to create a new version.</p> </li>
/// </ul>
/// <p>This value becomes the <code>VersionId</code> of the new version.</p>
pub fn client_request_token(&self) -> std::option::Option<&str> {
self.client_request_token.as_deref()
}
/// <p>The description of the secret.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt the secret value in the secret.</p>
/// <p>To use a KMS key in a different account, use the key ARN or the alias ARN.</p>
/// <p>If you don't specify this value, then Secrets Manager uses the key <code>aws/secretsmanager</code>. If that key doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.</p>
/// <p>If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use <code>aws/secretsmanager</code> to encrypt the secret, and you must create and use a customer managed KMS key. </p>
pub fn kms_key_id(&self) -> std::option::Option<&str> {
self.kms_key_id.as_deref()
}
/// <p>The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>This parameter is not available in the Secrets Manager console.</p>
pub fn secret_binary(&self) -> std::option::Option<&aws_smithy_types::Blob> {
self.secret_binary.as_ref()
}
/// <p>The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.</p>
/// <p>Either <code>SecretString</code> or <code>SecretBinary</code> must have a value, but not both.</p>
/// <p>If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the <code>SecretString</code> parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.</p>
pub fn secret_string(&self) -> std::option::Option<&str> {
self.secret_string.as_deref()
}
/// <p>A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example:</p>
/// <p> <code>[{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}]</code> </p>
/// <p>Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a different tag from one with key "abc".</p>
/// <p>If you check tags in permissions policies as part of your security strategy, then adding or removing a tag can change permissions. If the completion of this operation would result in you losing your permissions for this secret, then Secrets Manager blocks the operation and returns an <code>Access Denied</code> error. For more information, see <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#tag-secrets-abac">Control access to secrets using tags</a> and <a href="https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_examples.html#auth-and-access_tags2">Limit access to identities with tags that match secrets' tags</a>.</p>
/// <p>For information about how to format a JSON parameter for the various command line tool environments, see <a href="https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json">Using JSON for Parameters</a>. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.</p>
/// <p>The following restrictions apply to tags:</p>
/// <ul>
/// <li> <p>Maximum number of tags per secret: 50</p> </li>
/// <li> <p>Maximum key length: 127 Unicode characters in UTF-8</p> </li>
/// <li> <p>Maximum value length: 255 Unicode characters in UTF-8</p> </li>
/// <li> <p>Tag keys and values are case sensitive.</p> </li>
/// <li> <p>Do not use the <code>aws:</code> prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.</p> </li>
/// <li> <p>If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.</p> </li>
/// </ul>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
/// <p>A list of Regions and KMS keys to replicate secrets.</p>
pub fn add_replica_regions(&self) -> std::option::Option<&[crate::model::ReplicaRegionType]> {
self.add_replica_regions.as_deref()
}
/// <p>Specifies whether to overwrite a secret with the same name in the destination Region.</p>
pub fn force_overwrite_replica_secret(&self) -> bool {
self.force_overwrite_replica_secret
}
}
impl std::fmt::Debug for CreateSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CreateSecretInput");
formatter.field("name", &self.name);
formatter.field("client_request_token", &self.client_request_token);
formatter.field("description", &self.description);
formatter.field("kms_key_id", &self.kms_key_id);
formatter.field("secret_binary", &"*** Sensitive Data Redacted ***");
formatter.field("secret_string", &"*** Sensitive Data Redacted ***");
formatter.field("tags", &self.tags);
formatter.field("add_replica_regions", &self.add_replica_regions);
formatter.field(
"force_overwrite_replica_secret",
&self.force_overwrite_replica_secret,
);
formatter.finish()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct CancelRotateSecretInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub secret_id: std::option::Option<std::string::String>,
}
impl CancelRotateSecretInput {
/// <p>The ARN or name of the secret.</p>
/// <p>For an ARN, we recommend that you specify a complete ARN rather than a partial ARN.</p>
pub fn secret_id(&self) -> std::option::Option<&str> {
self.secret_id.as_deref()
}
}
impl std::fmt::Debug for CancelRotateSecretInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("CancelRotateSecretInput");
formatter.field("secret_id", &self.secret_id);
formatter.finish()
}
}
| 56.986546 | 714 | 0.647535 |
fb5877b8a48987af81eadb78e6925ae37f231705 | 943 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-emscripten no threads support
#![allow(unknown_features)]
#![feature(box_syntax)]
use std::thread;
pub fn main() { test05(); }
fn test05_start<F:FnOnce(isize)>(f: F) {
f(22);
}
fn test05() {
let three: Box<_> = box 3;
let fn_to_send = move|n:isize| {
println!("{}", *three + n); // will copy x into the closure
assert_eq!(*three, 3);
};
thread::spawn(move|| {
test05_start(fn_to_send);
}).join().ok().unwrap();
}
| 27.735294 | 68 | 0.660657 |
90bd96c84df04e0c054e6f257ebc7c8bd88e200b | 395 | pub enum DataColType {
SignedInt,
UnsignedInt,
Float,
Timestamp,
Bool,
VarChar
}
pub struct DataColumn {
pub name: String,
pub nullable: bool,
pub col_type: DataColType,
}
impl DataColumn {
pub fn col_type(&self) -> &DataColType {
&self.col_type
}
pub fn nullable(&self) -> bool {
self.nullable
}
}
| 14.62963 | 45 | 0.55443 |
d5509d624a881a1574b0a222e8e37ef729953b9d | 2,122 | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
use bencher::Bencher;
use deno_core::v8;
use deno_core::JsRuntime;
use crate::profiling::is_profiling;
pub fn create_js_runtime(setup: impl FnOnce(&mut JsRuntime)) -> JsRuntime {
let mut rt = JsRuntime::new(Default::default());
// Setup bootstrap namespace
rt.execute("bootstrap", "globalThis.__bootstrap = {};")
.unwrap();
// Caller provided setup
setup(&mut rt);
// Init ops
rt.sync_ops_cache();
rt
}
fn loop_code(iters: u64, src: &str) -> String {
format!(r#"for(let i=0; i < {}; i++) {{ {} }}"#, iters, src,)
}
pub fn bench_js_sync(
b: &mut Bencher,
src: &str,
setup: impl FnOnce(&mut JsRuntime),
) {
let mut runtime = create_js_runtime(setup);
let context = runtime.global_context();
let scope = &mut v8::HandleScope::with_context(runtime.v8_isolate(), context);
// Increase JS iterations if profiling for nicer flamegraphs
let inner_iters = 1000 * if is_profiling() { 10000 } else { 1 };
// Looped code
let looped_src = loop_code(inner_iters, src);
let code = v8::String::new(scope, looped_src.as_ref()).unwrap();
let script = v8::Script::compile(scope, code, None).unwrap();
// Run once if profiling, otherwise regular bench loop
if is_profiling() {
script.run(scope).unwrap();
} else {
b.iter(|| {
script.run(scope).unwrap();
});
}
}
pub fn bench_js_async(
b: &mut Bencher,
src: &str,
setup: impl FnOnce(&mut JsRuntime),
) {
let mut runtime = create_js_runtime(setup);
let tokio_runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
// Looped code
let looped = loop_code(1000, src);
let src = looped.as_ref();
if is_profiling() {
for _ in 0..10000 {
runtime.execute("inner_loop", src).unwrap();
let future = runtime.run_event_loop();
tokio_runtime.block_on(future).unwrap();
}
} else {
b.iter(|| {
runtime.execute("inner_loop", src).unwrap();
let future = runtime.run_event_loop();
tokio_runtime.block_on(future).unwrap();
});
}
}
| 25.261905 | 80 | 0.650801 |
e59ec85b2c00494735267ad78c3431e421824d63 | 3,149 | //! Write a function that given a directory, recursively finds all files with a given file
//! extension in that directory and all sub-directories, and counts the number of lines
//! in the file and prints it to stdout.
use std::fs::File;
use std::io::{self, BufRead, Result as IoResult};
use std::{env, path::{Path, PathBuf}};
/// Given an absolute path to a filename, read the file and count the number of lines
fn read_lines(file_path: &Path) -> IoResult<usize> {
let file = File::open(file_path)?;
Ok(io::BufReader::new(file).lines().count())
}
/// Recursively finds all files with a given file extension in a given directory
fn traverse_dir(dir: &Path, ext: &str) -> Vec<PathBuf> {
let mut paths = vec![];
for entry in dir.read_dir().unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
paths.extend(traverse_dir(&path, ext));
} else {
match path.extension() {
Some(extension) => {
if extension == ext {
paths.push(path);
}
}
None => {}
}
}
}
paths
}
/// Main entry point of the program
pub fn main() {
let args: Vec<String> = env::args().collect();
match args.len() {
2 => {
let file_path = Path::new(&args[1]);
if file_path.is_dir() {
println!("Not a file: {:?}", file_path);
return;
}
match read_lines(file_path) {
Ok(lines) => println!("File {:?} {}", file_path.file_name().unwrap(), lines),
Err(e) => println!("Error: {}", e),
}
},
3 => {
// let dir_path = pathdiff::diff_paths(Path::new(&args[1]), cur_dir).unwrap();
let dir_path = Path::new(&args[1]);
println!("Looking for files with extension `{}` in directory `{}`", args[2], dir_path.display());
if !dir_path.is_dir() {
match dir_path.extension() {
Some(ext) => {
if ext.to_str().unwrap() == args[2] {
let count = read_lines(dir_path);
println!("Total number of lines: {}", count.unwrap());
}
println!("Not a valid extension");
}
None => {
println!("No file with extension {} found", args[2]);
}
}
return;
}
let extension = &args[2];
let mut count = 0;
for path in traverse_dir(&dir_path, extension) {
let len = read_lines(path.as_path()).unwrap();
count += len;
println!("Reading file: {:?} {}", path.file_name().unwrap(), len);
}
println!("Total number of lines: {}", count);
},
_ => println!("Usage: \ncargo run <absolute_dir_path> <file_extension> \n OR \ncargo run <absolute_file_path> \n")
}
} | 36.195402 | 122 | 0.485551 |
1c49855548ed0e45624c3b495cf1a459daff9f09 | 1,423 | // Draw some sample text to the screen
extern crate quicksilver;
use quicksilver::{
Future, Result,
combinators::result,
geom::{Shape, Vector},
graphics::{Background::Img, Color, Font, FontStyle, Image},
lifecycle::{Asset, Settings, State, Window, run},
};
struct SampleText {
asset: Asset<Image>,
multiline: Asset<Image>,
}
impl State for SampleText {
fn new() -> Result<SampleText> {
let asset = Asset::new(Font::load("font.ttf")
.and_then(|font| {
let style = FontStyle::new(72.0, Color::BLACK);
result(font.render("Sample Text", &style))
}));
let multiline = Asset::new(Font::load("font.ttf")
.and_then(|font| {
let style = FontStyle::new(48.0, Color::BLACK);
result(font.render("First line\nSecond line\nThird line", &style))
}));
Ok(SampleText { asset, multiline })
}
fn draw(&mut self, window: &mut Window) -> Result<()> {
window.clear(Color::WHITE)?;
self.asset.execute(|image| {
window.draw(&image.area().with_center((400, 300)), Img(&image));
Ok(())
})?;
self.multiline.execute(|image| {
window.draw(&image.area(), Img(&image));
Ok(())
})
}
}
fn main() {
run::<SampleText>("Font Example", Vector::new(800, 600), Settings::default());
}
| 29.645833 | 82 | 0.549543 |
164817d2beada149d771181e6d6830929af326f1 | 5,304 | //! An immutable linked list implementation.
use crate::prelude::*;
// ============
// === List ===
// ============
/// Immutable linked list containing values of type [`T`]. As every node of the list is kept in
/// [`Rc`], cloning of any subsection of this list is very fast.
#[derive(Derivative, Deref)]
#[derivative(Clone(bound = ""))]
#[derivative(Default(bound = ""))]
pub struct List<T> {
#[allow(missing_docs)]
pub data: Option<NonEmpty<T>>,
}
/// Non-empty list. It is guaranteed to have at least one element. See [`List`] to learn more.
#[derive(Derivative, Deref, Debug)]
#[derivative(Clone(bound = ""))]
pub struct NonEmpty<T> {
#[allow(missing_docs)]
pub node: Rc<Node<T>>,
}
/// A node of the [`List`]. Contains the current value and link to list [`tail`].
#[derive(Clone, Debug)]
#[allow(missing_docs)]
pub struct Node<T> {
pub head: T,
pub tail: List<T>,
}
impl<T> Node<T> {
/// Constructor.
pub fn singleton(head: T) -> Self {
let tail = default();
Self { head, tail }
}
}
impl<T> NonEmpty<T> {
/// Constructor.
pub fn singleton(head: T) -> Self {
let node = Rc::new(Node::singleton(head));
Self { node }
}
/// Convert this non-empty list to list of unknown length.
pub fn into_list(self) -> List<T> {
let data = Some(self);
List { data }
}
/// Prepend the element to this list.
pub fn prepend(self, head: T) -> Self {
self.into_list().prepend(head)
}
/// Get the head element of this list.
pub fn head(&self) -> &T {
&self.head
}
/// Get tail of this list.
pub fn tail(&self) -> &List<T> {
&self.tail
}
/// Get the last element of this list.
pub fn last(&self) -> &T {
self.tail.last().unwrap_or_else(|| self.head())
}
/// Check whether this list is empty.
pub fn is_empty(&self) -> bool {
false
}
/// Convert this list to a vector.
fn to_vec(&self) -> Vec<&T> {
let mut out = vec![&self.head];
let mut list = self.tail();
loop {
match list.head() {
None => break,
Some(head) => {
out.push(head);
match list.tail() {
None => break,
Some(tail) => list = tail,
}
}
}
}
out
}
}
impl<T> List<T> {
/// Prepend the element to the list.
pub fn prepend(self, head: T) -> NonEmpty<T> {
let tail = self;
let node = Rc::new(Node { head, tail });
NonEmpty { node }
}
/// Get the head element.
pub fn head(&self) -> Option<&T> {
self.as_ref().map(|t| t.head())
}
/// Get the tail of this list.
pub fn tail(&self) -> Option<&List<T>> {
self.as_ref().map(|t| t.tail())
}
/// Get the last element of this list.
pub fn last(&self) -> Option<&T> {
self.data.as_ref().map(|t| t.last())
}
/// Check whether this list is empty.
pub fn is_empty(&self) -> bool {
self.is_none()
}
/// Convert this list to a vector.
fn to_vec(&self) -> Vec<&T> {
self.data.as_ref().map(|t| t.to_vec()).unwrap_or_default()
}
/// Convert this list to a non-empty list. Return [`None`] if the list is empty.
pub fn as_non_empty(&self) -> &Option<NonEmpty<T>> {
&self.data
}
/// Convert this list to a non-empty list. Return [`None`] if the list is empty.
pub fn into_non_empty(self) -> Option<NonEmpty<T>> {
self.data
}
}
impl<T> From<NonEmpty<T>> for List<T> {
fn from(list: NonEmpty<T>) -> Self {
list.into_list()
}
}
impl<T: Debug> Debug for List<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&self.to_vec(), f)
}
}
impl<'a, T> IntoIterator for &'a List<T> {
type Item = &'a T;
type IntoIter = std::vec::IntoIter<&'a T>;
fn into_iter(self) -> Self::IntoIter {
self.to_vec().into_iter()
}
}
impl<'a, T> IntoIterator for &'a NonEmpty<T> {
type Item = &'a T;
type IntoIter = std::vec::IntoIter<&'a T>;
fn into_iter(self) -> Self::IntoIter {
self.to_vec().into_iter()
}
}
impl<T> FromIterator<T> for List<T> {
// Clippy reports false warning here as we cannot add a bound to `I` that it needs to be a
// double-ended iterator.
#[allow(clippy::needless_collect)]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let vec: Vec<T> = iter.into_iter().collect();
let mut list = List::default();
for item in vec.into_iter().rev() {
list = list.prepend(item).into()
}
list
}
}
impl<T> From<Vec<T>> for List<T> {
fn from(v: Vec<T>) -> Self {
let mut out = List::default();
for item in v.into_iter().rev() {
out = out.prepend(item).into_list();
}
out
}
}
impl<T> TryFrom<Vec<T>> for NonEmpty<T> {
type Error = failure::Error;
fn try_from(v: Vec<T>) -> Result<Self, Self::Error> {
let err = "Cannot convert empty Vec to NonEmpty one.";
List::<T>::from(v).into_non_empty().ok_or_else(|| failure::err_msg(err))
}
}
| 25.5 | 95 | 0.536953 |
fe18bd91f6ec1e3081947623307d00f7f245c374 | 51 | pub mod list_notifications;
pub mod notifications;
| 17 | 27 | 0.843137 |
18ff251af996a94d779bf20d3db443685ef5f1f3 | 6,833 | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//! Authoring RPC module client errors.
#[cfg(all(not(feature = "std"), feature = "sgx"))]
use crate::sgx_reexport_prelude::*;
use derive_more::{Display, From};
use jsonrpc_core as rpc_core;
use std::{boxed::Box, format};
/// Author RPC Result type.
pub type Result<T> = core::result::Result<T, Error>;
/// Author RPC errors.
#[derive(Debug, Display, From)]
pub enum Error {
/// Client error.
#[display(fmt = "Client error: {}", _0)]
#[from(ignore)]
Client(Box<dyn std::error::Error + Send>),
/// TrustedOperation pool error,
#[display(fmt = "TrustedOperation pool error: {}", _0)]
Pool(its_top_pool::error::Error),
/// Verification error
#[display(fmt = "Extrinsic verification error")]
#[from(ignore)]
Verification,
/// Incorrect extrinsic format.
#[display(fmt = "Invalid trusted call format")]
BadFormat,
// Incorrect enciphered trusted call format.
#[display(fmt = "Invalid enciphered trusted call format")]
BadFormatDecipher,
/// Incorrect seed phrase.
#[display(fmt = "Invalid seed phrase/SURI")]
BadSeedPhrase,
/// Key type ID has an unknown format.
#[display(fmt = "Invalid key type ID format (should be of length four)")]
BadKeyType,
/// Key type ID has some unsupported crypto.
#[display(fmt = "The crypto of key type ID is unknown")]
UnsupportedKeyType,
/// Some random issue with the key store. Shouldn't happen.
#[display(fmt = "The key store is unavailable")]
KeyStoreUnavailable,
/// Invalid session keys encoding.
#[display(fmt = "Session keys are not encoded correctly")]
InvalidSessionKeys,
/// Shard does not exist.
#[display(fmt = "Shard does not exist")]
InvalidShard,
/// Unsupported trusted operation (in case we allow only certain types of operations, using filters)
#[display(fmt = "Unsupported operation type")]
UnsupportedOperation,
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Error::Client(ref err) => Some(&**err),
//Error::Pool(ref err) => Some(err),
//Error::Verification(ref err) => Some(&**err),
_ => None,
}
}
}
/// Base code for all authorship errors.
const BASE_ERROR: i64 = 1000;
/// Extrinsic has an invalid format.
const BAD_FORMAT: i64 = BASE_ERROR + 1;
/// Error during operation verification in runtime.
const VERIFICATION_ERROR: i64 = BASE_ERROR + 2;
/// Pool rejected the operation as invalid
const POOL_INVALID_TX: i64 = BASE_ERROR + 10;
/// Cannot determine operation validity.
const POOL_UNKNOWN_VALIDITY: i64 = POOL_INVALID_TX + 1;
/// The operation is temporarily banned.
const POOL_TEMPORARILY_BANNED: i64 = POOL_INVALID_TX + 2;
/// The operation is already in the pool
const POOL_ALREADY_IMPORTED: i64 = POOL_INVALID_TX + 3;
/// TrustedOperation has too low priority to replace existing one in the pool.
const POOL_TOO_LOW_PRIORITY: i64 = POOL_INVALID_TX + 4;
/// Including this operation would cause a dependency cycle.
const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5;
/// The operation was not included to the pool because of the limits.
const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6;
/// The key type crypto is not known.
const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7;
impl From<Error> for rpc_core::Error {
fn from(e: Error) -> Self {
use its_top_pool::error::Error as PoolError;
match e {
Error::BadFormat => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(BAD_FORMAT),
message: "Trusted operation has invalid format".into(),
data: None,
},
Error::BadFormatDecipher => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(BAD_FORMAT),
message: "Trusted operation could not be deciphered".into(),
data: None,
},
Error::Verification => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(VERIFICATION_ERROR),
message: "Verification Error".into(),
data: Some(format!("{:?}", e).into()),
},
Error::InvalidShard => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(VERIFICATION_ERROR),
message: "Shard does not exist".into(),
data: Some(format!("{:?}", e).into()),
},
Error::Pool(PoolError::InvalidTrustedOperation) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_INVALID_TX),
message: "Invalid Trusted Operation".into(),
data: None,
},
Error::Pool(PoolError::UnknownTrustedOperation) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_UNKNOWN_VALIDITY),
message: "Unknown Trusted Operation Validity".into(),
data: None,
},
Error::Pool(PoolError::TemporarilyBanned) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_TEMPORARILY_BANNED),
message: "Trusted Operation is temporarily banned".into(),
data: None,
},
Error::Pool(PoolError::AlreadyImported) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_ALREADY_IMPORTED),
message: "Trusted Operation Already Imported".into(),
data: None,
},
Error::Pool(PoolError::TooLowPriority(new)) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_TOO_LOW_PRIORITY),
message: format!("Priority is too low: {}", new),
data: Some("The Trusted Operation has too low priority to replace another Trusted Operation already in the pool.".into()),
},
Error::Pool(PoolError::CycleDetected) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_CYCLE_DETECTED),
message: "Cycle Detected".into(),
data: None,
},
Error::Pool(PoolError::ImmediatelyDropped) => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(POOL_IMMEDIATELY_DROPPED),
message: "Immediately Dropped".into(),
data: Some("The Trusted Operation couldn't enter the pool because of the limit".into()),
},
Error::UnsupportedKeyType => rpc_core::Error {
code: rpc_core::ErrorCode::ServerError(UNSUPPORTED_KEY_TYPE),
message: "Unknown key type crypto" .into(),
data: Some(
"The crypto for the given key type is unknown, please add the public key to the \
request to insert the key successfully.".into()
),
},
e => rpc_core::Error {
code: rpc_core::ErrorCode::InternalError,
message: "Unknown error occurred".into(),
data: Some(format!("{:?}", e).into()),
},
}
}
}
| 37.13587 | 126 | 0.707156 |
db80dcdf3b5aba21ea8bb94cae46d4b2981f149f | 4,956 | use pyo3::exceptions::{PyBlockingIOError, PyRuntimeError};
use pyo3::prelude::*;
use pyo3::types::PyBytes;
use bytes::BytesMut;
use crossbeam::channel::{bounded, Receiver, Sender, TryRecvError, TrySendError};
use crossbeam::queue::SegQueue;
use std::sync::Arc;
use super::{ReceiverPayload, WakerQueue};
/// The callable class that handling communication back to the server protocol.
#[pyclass]
pub struct DataReceiver {
/// The receiver half for receiving the client body chunks.
rx: Receiver<ReceiverPayload>,
/// A queue of waiting events to invoke before the body
/// can be read from the receiver again.
waiter_queue: WakerQueue,
}
impl DataReceiver {
/// Create a new handler with the given sender.
pub fn new(rx: Receiver<ReceiverPayload>, waiter_queue: WakerQueue) -> Self {
Self { rx, waiter_queue }
}
}
#[pymethods]
impl DataReceiver {
/// Receives a chunk of data from the socket without blocking.
///
/// Invoked by python passing more_body which represents if there
/// is any more body to expect or not, and the body itself.
///
/// Returns:
/// A tuple containing a boolean and a set of bytes, the boolean signals
/// if there is more data to be read from the socket or not and the
/// bytes returned are what contain the actual data.
///
/// Raises:
/// RuntimeError:
/// If the channel the receiver uses to communicate with the main
/// socket handler is closed.
///
/// BlockingIOError:
/// The receiver is empty and would block waiting for data to be
/// sent to the receiver. In the event that this error is raised
/// the handler should set a waker in order to be notified when
/// data is available.
#[call]
fn __call__(&self) -> PyResult<(bool, Py<PyBytes>)> {
let resp = self.rx.try_recv();
return match resp {
Ok(values) => Ok(values),
Err(TryRecvError::Disconnected) => Err(PyRuntimeError::new_err(
"receiving channel was unexpectedly closed.",
)),
Err(TryRecvError::Empty) => Err(PyBlockingIOError::new_err(())),
};
}
/// Submits a given callback to the waiter queue.
///
/// Any waiters in the queue when the socket is able to be read from will
/// be taken out of the queue and invoked signalling the system's ability
/// to be read from again.
///
/// All waker callbacks are invoked with no parameters or key word
/// arguments and are expected not to directly raise an error, in the case
/// that a waker does raise an error the exception is ignored and
/// implicitly silenced.
///
/// Args:
/// waker:
/// A callback to be invoked when data can be read from the socket
/// without blocking.
fn subscribe(&self, waker: PyObject) {
self.waiter_queue.push(waker);
}
}
/// A factory / manager for receiver handles sending data from the server
/// handler to the Python callbacks.
pub struct ReceiverFactory {
/// The sender half for sending the client body chunks.
receiver_tx: Sender<ReceiverPayload>,
/// The receiver half for receiving the client body chunks.
receiver_rx: Receiver<ReceiverPayload>,
/// A queue of waiting events to invoke before the body
/// can be read from the receiver again.
waiter_queue: WakerQueue,
}
impl ReceiverFactory {
/// Constructs a new factory.
pub fn new() -> Self {
let (tx, rx) = bounded(2);
let queue = Arc::new(SegQueue::new());
Self {
receiver_tx: tx,
receiver_rx: rx,
waiter_queue: queue,
}
}
/// Makes a new sending handle with the given factory channels and queue.
pub fn make_handle(&self) -> DataReceiver {
DataReceiver::new(self.receiver_rx.clone(), self.waiter_queue.clone())
}
/// Sends the given payload to the handler channel.
///
/// This implicitly wakes up any waiters waiting for a chunk of data
/// from the handler, unlike the receiver version of this responder
/// this will only pop one waiter from the queue and pass it the chunk
/// of data vs waking all waiters.
pub fn send(&self, data: (bool, BytesMut)) -> Result<(), TrySendError<ReceiverPayload>> {
Python::with_gil(|py| {
let bytes_body = unsafe { PyBytes::from_ptr(py, data.1.as_ptr(), data.1.len()) };
let body = Py::from(bytes_body);
if self.waiter_queue.len() > 0 {
if let Some(waker) = self.waiter_queue.pop() {
// The waker should not affect the writer
let _ = waker.call1(py, (data.0, body));
}
Ok(())
} else {
self.receiver_tx.try_send((data.0, body))
}
})
}
}
| 35.4 | 93 | 0.618039 |
d654e6b04b7bf9e6f0506c42dadcb51721833187 | 35,179 | //! Streams
//!
//! This module contains a number of functions for working with `Streams`s
//! that return `Result`s, allowing for short-circuiting computations.
#[cfg(feature = "compat")]
use crate::compat::Compat;
use crate::fns::{
inspect_err_fn, inspect_ok_fn, into_fn, map_err_fn, map_ok_fn, InspectErrFn, InspectOkFn,
IntoFn, MapErrFn, MapOkFn,
};
use crate::future::assert_future;
use crate::stream::{assert_stream, Inspect, Map};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::pin::Pin;
use futures_core::{
future::{Future, TryFuture},
stream::TryStream,
task::{Context, Poll},
};
#[cfg(feature = "sink")]
use futures_sink::Sink;
mod and_then;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::and_then::AndThen;
delegate_all!(
/// Stream for the [`err_into`](super::TryStreamExt::err_into) method.
ErrInto<St, E>(
MapErr<St, IntoFn<E>>
): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St| MapErr::new(x, into_fn())]
);
delegate_all!(
/// Stream for the [`inspect_ok`](super::TryStreamExt::inspect_ok) method.
InspectOk<St, F>(
Inspect<IntoStream<St>, InspectOkFn<F>>
): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_ok_fn(f))]
);
delegate_all!(
/// Stream for the [`inspect_err`](super::TryStreamExt::inspect_err) method.
InspectErr<St, F>(
Inspect<IntoStream<St>, InspectErrFn<F>>
): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_err_fn(f))]
);
mod into_stream;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::into_stream::IntoStream;
delegate_all!(
/// Stream for the [`map_ok`](super::TryStreamExt::map_ok) method.
MapOk<St, F>(
Map<IntoStream<St>, MapOkFn<F>>
): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_ok_fn(f))]
);
delegate_all!(
/// Stream for the [`map_err`](super::TryStreamExt::map_err) method.
MapErr<St, F>(
Map<IntoStream<St>, MapErrFn<F>>
): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_err_fn(f))]
);
mod or_else;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::or_else::OrElse;
mod try_next;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_next::TryNext;
mod try_filter;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_filter::TryFilter;
#[cfg(feature = "sink")]
mod try_forward;
#[cfg(feature = "sink")]
delegate_all!(
/// Future for the [`try_forward`](super::TryStreamExt::try_forward) method.
#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
TryForward<St, Si>(
try_forward::TryForward<St, Si, St::Ok>
): Debug + Future + FusedFuture + New[|x: St, y: Si| try_forward::TryForward::new(x, y)]
where St: TryStream
);
mod try_filter_map;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_filter_map::TryFilterMap;
mod try_flatten;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_flatten::TryFlatten;
mod try_collect;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_collect::TryCollect;
mod try_concat;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_concat::TryConcat;
#[cfg(feature = "alloc")]
mod try_chunks;
#[cfg(feature = "alloc")]
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_chunks::{TryChunks, TryChunksError};
mod try_unfold;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_unfold::{try_unfold, TryUnfold};
mod try_skip_while;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_skip_while::TrySkipWhile;
mod try_take_while;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_take_while::TryTakeWhile;
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
mod try_buffer_unordered;
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_buffer_unordered::TryBufferUnordered;
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
mod try_buffered;
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::try_buffered::TryBuffered;
#[cfg(feature = "io")]
#[cfg(feature = "std")]
mod into_async_read;
#[cfg(feature = "io")]
#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
#[cfg(feature = "std")]
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::into_async_read::IntoAsyncRead;
impl<S: ?Sized + TryStream> TryStreamExt for S {}
/// Adapters specific to `Result`-returning streams
pub trait TryStreamExt: TryStream {
/// Wraps the current stream in a new stream which converts the error type
/// into the one provided.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(()), Err(5i32)])
/// .err_into::<i64>();
///
/// assert_eq!(stream.try_next().await, Ok(Some(())));
/// assert_eq!(stream.try_next().await, Err(5i64));
/// # })
/// ```
fn err_into<E>(self) -> ErrInto<Self, E>
where
Self: Sized,
Self::Error: Into<E>,
{
assert_stream::<Result<Self::Ok, E>, _>(ErrInto::new(self))
}
/// Wraps the current stream in a new stream which maps the success value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_ok(|x| x + 2);
///
/// assert_eq!(stream.try_next().await, Ok(Some(7)));
/// assert_eq!(stream.try_next().await, Err(0));
/// # })
/// ```
fn map_ok<T, F>(self, f: F) -> MapOk<Self, F>
where
Self: Sized,
F: FnMut(Self::Ok) -> T,
{
assert_stream::<Result<T, Self::Error>, _>(MapOk::new(self, f))
}
/// Wraps the current stream in a new stream which maps the error value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_err(|x| x + 2);
///
/// assert_eq!(stream.try_next().await, Ok(Some(5)));
/// assert_eq!(stream.try_next().await, Err(2));
/// # })
/// ```
fn map_err<E, F>(self, f: F) -> MapErr<Self, F>
where
Self: Sized,
F: FnMut(Self::Error) -> E,
{
assert_stream::<Result<Self::Ok, E>, _>(MapErr::new(self, f))
}
/// Chain on a computation for when a value is ready, passing the successful
/// results to the provided closure `f`.
///
/// This function can be used to run a unit of work when the next successful
/// value on a stream is ready. The closure provided will be yielded a value
/// when ready, and the returned future will then be run to completion to
/// produce the next value on this stream.
///
/// Any errors produced by this stream will not be passed to the closure,
/// and will be passed through.
///
/// The returned value of the closure must implement the `TryFuture` trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
///
/// To process the entire stream and return a single future representing
/// success or error, use `try_for_each` instead.
///
/// # Examples
///
/// ```
/// use futures::channel::mpsc;
/// use futures::future;
/// use futures::stream::TryStreamExt;
///
/// let (_tx, rx) = mpsc::channel::<Result<i32, ()>>(1);
///
/// let rx = rx.and_then(|result| {
/// future::ok(if result % 2 == 0 {
/// Some(result)
/// } else {
/// None
/// })
/// });
/// ```
fn and_then<Fut, F>(self, f: F) -> AndThen<Self, Fut, F>
where
F: FnMut(Self::Ok) -> Fut,
Fut: TryFuture<Error = Self::Error>,
Self: Sized,
{
assert_stream::<Result<Fut::Ok, Fut::Error>, _>(AndThen::new(self, f))
}
/// Chain on a computation for when an error happens, passing the
/// erroneous result to the provided closure `f`.
///
/// This function can be used to run a unit of work and attempt to recover from
/// an error if one happens. The closure provided will be yielded an error
/// when one appears, and the returned future will then be run to completion
/// to produce the next value on this stream.
///
/// Any successful values produced by this stream will not be passed to the
/// closure, and will be passed through.
///
/// The returned value of the closure must implement the [`TryFuture`](futures_core::future::TryFuture) trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
fn or_else<Fut, F>(self, f: F) -> OrElse<Self, Fut, F>
where
F: FnMut(Self::Error) -> Fut,
Fut: TryFuture<Ok = Self::Ok>,
Self: Sized,
{
assert_stream::<Result<Self::Ok, Fut::Error>, _>(OrElse::new(self, f))
}
/// A future that completes after the given stream has been fully processed
/// into the sink and the sink has been flushed and closed.
///
/// This future will drive the stream to keep producing items until it is
/// exhausted, sending each item to the sink. It will complete once the
/// stream is exhausted, the sink has received and flushed all items, and
/// the sink is closed. Note that neither the original stream nor provided
/// sink will be output by this future. Pass the sink by `Pin<&mut S>`
/// (for example, via `try_forward(&mut sink)` inside an `async` fn/block) in
/// order to preserve access to the `Sink`. If the stream produces an error,
/// that error will be returned by this future without flushing/closing the sink.
#[cfg(feature = "sink")]
#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
fn try_forward<S>(self, sink: S) -> TryForward<Self, S>
where
S: Sink<Self::Ok, Error = Self::Error>,
Self: Sized,
{
assert_future::<Result<(), Self::Error>, _>(TryForward::new(self, sink))
}
/// Do something with the success value of this stream, afterwards passing
/// it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the success value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_ok<F>(self, f: F) -> InspectOk<Self, F>
where
F: FnMut(&Self::Ok),
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(InspectOk::new(self, f))
}
/// Do something with the error value of this stream, afterwards passing it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the error value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
where
F: FnMut(&Self::Error),
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(InspectErr::new(self, f))
}
/// Wraps a [`TryStream`] into a type that implements
/// [`Stream`](futures_core::stream::Stream)
///
/// [`TryStream`]s currently do not implement the
/// [`Stream`](futures_core::stream::Stream) trait because of limitations
/// of the compiler.
///
/// # Examples
///
/// ```
/// use futures::stream::{Stream, TryStream, TryStreamExt};
///
/// # type T = i32;
/// # type E = ();
/// fn make_try_stream() -> impl TryStream<Ok = T, Error = E> { // ... }
/// # futures::stream::empty()
/// # }
/// fn take_stream(stream: impl Stream<Item = Result<T, E>>) { /* ... */ }
///
/// take_stream(make_try_stream().into_stream());
/// ```
fn into_stream(self) -> IntoStream<Self>
where
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(IntoStream::new(self))
}
/// Creates a future that attempts to resolve the next item in the stream.
/// If an error is encountered before the next item, the error is returned
/// instead.
///
/// This is similar to the `Stream::next` combinator, but returns a
/// `Result<Option<T>, E>` rather than an `Option<Result<T, E>>`, making
/// for easy use with the `?` operator.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream = stream::iter(vec![Ok(()), Err(())]);
///
/// assert_eq!(stream.try_next().await, Ok(Some(())));
/// assert_eq!(stream.try_next().await, Err(()));
/// # })
/// ```
fn try_next(&mut self) -> TryNext<'_, Self>
where
Self: Unpin,
{
assert_future::<Result<Option<Self::Ok>, Self::Error>, _>(TryNext::new(self))
}
/// Skip elements on this stream while the provided asynchronous predicate
/// resolves to `true`.
///
/// This function is similar to
/// [`StreamExt::skip_while`](crate::stream::StreamExt::skip_while) but exits
/// early if an error occurs.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(3), Ok(2)]);
/// let stream = stream.try_skip_while(|x| future::ready(Ok(*x < 3)));
///
/// let output: Result<Vec<i32>, i32> = stream.try_collect().await;
/// assert_eq!(output, Ok(vec![3, 2]));
/// # })
/// ```
fn try_skip_while<Fut, F>(self, f: F) -> TrySkipWhile<Self, Fut, F>
where
F: FnMut(&Self::Ok) -> Fut,
Fut: TryFuture<Ok = bool, Error = Self::Error>,
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(TrySkipWhile::new(self, f))
}
/// Take elements on this stream while the provided asynchronous predicate
/// resolves to `true`.
///
/// This function is similar to
/// [`StreamExt::take_while`](crate::stream::StreamExt::take_while) but exits
/// early if an error occurs.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2), Ok(3), Ok(2)]);
/// let stream = stream.try_take_while(|x| future::ready(Ok(*x < 3)));
///
/// let output: Result<Vec<i32>, i32> = stream.try_collect().await;
/// assert_eq!(output, Ok(vec![1, 2]));
/// # })
/// ```
fn try_take_while<Fut, F>(self, f: F) -> TryTakeWhile<Self, Fut, F>
where
F: FnMut(&Self::Ok) -> Fut,
Fut: TryFuture<Ok = bool, Error = Self::Error>,
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(TryTakeWhile::new(self, f))
}
/// Attempt to transform a stream into a collection,
/// returning a future representing the result of that computation.
///
/// This combinator will collect all successful results of this stream and
/// collect them into the specified collection type. If an error happens then all
/// collected elements will be dropped and the error will be returned.
///
/// The returned future will be resolved when the stream terminates.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (tx, rx) = mpsc::unbounded();
///
/// thread::spawn(move || {
/// for i in 1..=5 {
/// tx.unbounded_send(Ok(i)).unwrap();
/// }
/// tx.unbounded_send(Err(6)).unwrap();
/// });
///
/// let output: Result<Vec<i32>, i32> = rx.try_collect().await;
/// assert_eq!(output, Err(6));
/// # })
/// ```
fn try_collect<C: Default + Extend<Self::Ok>>(self) -> TryCollect<Self, C>
where
Self: Sized,
{
assert_future::<Result<C, Self::Error>, _>(TryCollect::new(self))
}
/// An adaptor for chunking up successful items of the stream inside a vector.
///
/// This combinator will attempt to pull successful items from this stream and buffer
/// them into a local vector. At most `capacity` items will get buffered
/// before they're yielded from the returned stream.
///
/// Note that the vectors returned from this iterator may not always have
/// `capacity` elements. If the underlying stream ended and only a partial
/// vector was created, it'll be returned. Additionally if an error happens
/// from the underlying stream then the currently buffered items will be
/// yielded.
///
/// This method is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
///
/// This function is similar to
/// [`StreamExt::chunks`](crate::stream::StreamExt::chunks) but exits
/// early if an error occurs.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryChunksError, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2), Ok(3), Err(4), Ok(5), Ok(6)]);
/// let mut stream = stream.try_chunks(2);
///
/// assert_eq!(stream.try_next().await, Ok(Some(vec![1, 2])));
/// assert_eq!(stream.try_next().await, Err(TryChunksError(vec![3], 4)));
/// assert_eq!(stream.try_next().await, Ok(Some(vec![5, 6])));
/// # })
/// ```
///
/// # Panics
///
/// This method will panic if `capacity` is zero.
#[cfg(feature = "alloc")]
fn try_chunks(self, capacity: usize) -> TryChunks<Self>
where
Self: Sized,
{
assert_stream::<Result<Vec<Self::Ok>, TryChunksError<Self::Ok, Self::Error>>, _>(
TryChunks::new(self, capacity),
)
}
/// Attempt to filter the values produced by this stream according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided predicate `f`
/// will be run on them. If the predicate returns a `Future` which resolves
/// to `true`, then the stream will yield the value, but if the predicate
/// return a `Future` which resolves to `false`, then the value will be
/// discarded and the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter` methods in
/// the standard library.
///
/// # Examples
/// ```
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(2i32), Ok(3i32), Err("error")]);
/// let mut evens = stream.try_filter(|x| {
/// future::ready(x % 2 == 0)
/// });
///
/// assert_eq!(evens.next().await, Some(Ok(2)));
/// assert_eq!(evens.next().await, Some(Err("error")));
/// # })
/// ```
fn try_filter<Fut, F>(self, f: F) -> TryFilter<Self, Fut, F>
where
Fut: Future<Output = bool>,
F: FnMut(&Self::Ok) -> Fut,
Self: Sized,
{
assert_stream::<Result<Self::Ok, Self::Error>, _>(TryFilter::new(self, f))
}
/// Attempt to filter the values produced by this stream while
/// simultaneously mapping them to a different type according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided function will
/// be run on them. If the future returned by the predicate `f` resolves to
/// [`Some(item)`](Some) then the stream will yield the value `item`, but if
/// it resolves to [`None`] then the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter_map` methods in
/// the standard library.
///
/// # Examples
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, StreamExt, TryStreamExt};
/// use futures::pin_mut;
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(6i32), Err("error")]);
/// let halves = stream.try_filter_map(|x| async move {
/// let ret = if x % 2 == 0 { Some(x / 2) } else { None };
/// Ok(ret)
/// });
///
/// pin_mut!(halves);
/// assert_eq!(halves.next().await, Some(Ok(3)));
/// assert_eq!(halves.next().await, Some(Err("error")));
/// # })
/// ```
fn try_filter_map<Fut, F, T>(self, f: F) -> TryFilterMap<Self, Fut, F>
where
Fut: TryFuture<Ok = Option<T>, Error = Self::Error>,
F: FnMut(Self::Ok) -> Fut,
Self: Sized,
{
assert_stream::<Result<T, Self::Error>, _>(TryFilterMap::new(self, f))
}
/// Flattens a stream of streams into just one continuous stream.
///
/// If this stream's elements are themselves streams then this combinator
/// will flatten out the entire stream to one long chain of elements. Any
/// errors are passed through without looking at them, but otherwise each
/// individual stream will get exhausted before moving on to the next.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::stream::{StreamExt, TryStreamExt};
/// use std::thread;
///
/// let (tx1, rx1) = mpsc::unbounded();
/// let (tx2, rx2) = mpsc::unbounded();
/// let (tx3, rx3) = mpsc::unbounded();
///
/// thread::spawn(move || {
/// tx1.unbounded_send(Ok(1)).unwrap();
/// });
/// thread::spawn(move || {
/// tx2.unbounded_send(Ok(2)).unwrap();
/// tx2.unbounded_send(Err(3)).unwrap();
/// tx2.unbounded_send(Ok(4)).unwrap();
/// });
/// thread::spawn(move || {
/// tx3.unbounded_send(Ok(rx1)).unwrap();
/// tx3.unbounded_send(Ok(rx2)).unwrap();
/// tx3.unbounded_send(Err(5)).unwrap();
/// });
///
/// let mut stream = rx3.try_flatten();
/// assert_eq!(stream.next().await, Some(Ok(1)));
/// assert_eq!(stream.next().await, Some(Ok(2)));
/// assert_eq!(stream.next().await, Some(Err(3)));
/// assert_eq!(stream.next().await, Some(Ok(4)));
/// assert_eq!(stream.next().await, Some(Err(5)));
/// assert_eq!(stream.next().await, None);
/// # });
/// ```
fn try_flatten(self) -> TryFlatten<Self>
where
Self::Ok: TryStream,
<Self::Ok as TryStream>::Error: From<Self::Error>,
Self: Sized,
{
assert_stream::<Result<<Self::Ok as TryStream>::Ok, <Self::Ok as TryStream>::Error>, _>(
TryFlatten::new(self),
)
}
/// Attempt to concatenate all items of a stream into a single
/// extendable destination, returning a future representing the end result.
///
/// This combinator will extend the first item with the contents of all
/// the subsequent successful results of the stream. If the stream is empty,
/// the default value will be returned.
///
/// Works with all collections that implement the [`Extend`](std::iter::Extend) trait.
///
/// This method is similar to [`concat`](crate::stream::StreamExt::concat), but will
/// exit early if an error is encountered in the stream.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (tx, rx) = mpsc::unbounded::<Result<Vec<i32>, ()>>();
///
/// thread::spawn(move || {
/// for i in (0..3).rev() {
/// let n = i * 3;
/// tx.unbounded_send(Ok(vec![n + 1, n + 2, n + 3])).unwrap();
/// }
/// });
///
/// let result = rx.try_concat().await;
///
/// assert_eq!(result, Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
/// # });
/// ```
fn try_concat(self) -> TryConcat<Self>
where
Self: Sized,
Self::Ok: Extend<<<Self as TryStream>::Ok as IntoIterator>::Item> + IntoIterator + Default,
{
assert_future::<Result<Self::Ok, Self::Error>, _>(TryConcat::new(self))
}
/// Attempt to execute several futures from a stream concurrently (unordered).
///
/// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type
/// that matches the stream's `Error` type.
///
/// This adaptor will buffer up to `n` futures and then return their
/// outputs in the order in which they complete. If the underlying stream
/// returns an error, it will be immediately propagated.
///
/// The limit argument is of type `Into<Option<usize>>`, and so can be
/// provided as either `None`, `Some(10)`, or just `10`. Note: a limit of zero is
/// interpreted as no limit at all, and will have the same result as passing in `None`.
///
/// The returned stream will be a stream of results, each containing either
/// an error or a future's output. An error can be produced either by the
/// underlying stream itself or by one of the futures it yielded.
///
/// This method is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// Results are returned in the order of completion:
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (send_one, recv_one) = oneshot::channel();
/// let (send_two, recv_two) = oneshot::channel();
///
/// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
///
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// send_two.send(2i32)?;
/// assert_eq!(buffered.next().await, Some(Ok(2i32)));
///
/// send_one.send(1i32)?;
/// assert_eq!(buffered.next().await, Some(Ok(1i32)));
///
/// assert_eq!(buffered.next().await, None);
/// # Ok::<(), i32>(()) }).unwrap();
/// ```
///
/// Errors from the underlying stream itself are propagated:
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::stream::{StreamExt, TryStreamExt};
///
/// let (sink, stream_of_futures) = mpsc::unbounded();
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// sink.unbounded_send(Ok(async { Ok(7i32) }))?;
/// assert_eq!(buffered.next().await, Some(Ok(7i32)));
///
/// sink.unbounded_send(Err("error in the stream"))?;
/// assert_eq!(buffered.next().await, Some(Err("error in the stream")));
/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
/// ```
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
fn try_buffer_unordered(self, n: impl Into<Option<usize>>) -> TryBufferUnordered<Self>
where
Self::Ok: TryFuture<Error = Self::Error>,
Self: Sized,
{
assert_stream::<Result<<Self::Ok as TryFuture>::Ok, Self::Error>, _>(
TryBufferUnordered::new(self, n.into()),
)
}
/// Attempt to execute several futures from a stream concurrently.
///
/// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type
/// that matches the stream's `Error` type.
///
/// This adaptor will buffer up to `n` futures and then return their
/// outputs in the same order as the underlying stream. If the underlying stream returns an error, it will
/// be immediately propagated.
///
/// The limit argument is of type `Into<Option<usize>>`, and so can be
/// provided as either `None`, `Some(10)`, or just `10`. Note: a limit of zero is
/// interpreted as no limit at all, and will have the same result as passing in `None`.
///
/// The returned stream will be a stream of results, each containing either
/// an error or a future's output. An error can be produced either by the
/// underlying stream itself or by one of the futures it yielded.
///
/// This method is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// Results are returned in the order of addition:
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::future::lazy;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (send_one, recv_one) = oneshot::channel();
/// let (send_two, recv_two) = oneshot::channel();
///
/// let mut buffered = lazy(move |cx| {
/// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
///
/// let mut buffered = stream_of_futures.try_buffered(10);
///
/// assert!(buffered.try_poll_next_unpin(cx).is_pending());
///
/// send_two.send(2i32)?;
/// assert!(buffered.try_poll_next_unpin(cx).is_pending());
/// Ok::<_, i32>(buffered)
/// }).await?;
///
/// send_one.send(1i32)?;
/// assert_eq!(buffered.next().await, Some(Ok(1i32)));
/// assert_eq!(buffered.next().await, Some(Ok(2i32)));
///
/// assert_eq!(buffered.next().await, None);
/// # Ok::<(), i32>(()) }).unwrap();
/// ```
///
/// Errors from the underlying stream itself are propagated:
/// ```
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::stream::{StreamExt, TryStreamExt};
///
/// let (sink, stream_of_futures) = mpsc::unbounded();
/// let mut buffered = stream_of_futures.try_buffered(10);
///
/// sink.unbounded_send(Ok(async { Ok(7i32) }))?;
/// assert_eq!(buffered.next().await, Some(Ok(7i32)));
///
/// sink.unbounded_send(Err("error in the stream"))?;
/// assert_eq!(buffered.next().await, Some(Err("error in the stream")));
/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
/// ```
#[cfg(not(futures_no_atomic_cas))]
#[cfg(feature = "alloc")]
fn try_buffered(self, n: impl Into<Option<usize>>) -> TryBuffered<Self>
where
Self::Ok: TryFuture<Error = Self::Error>,
Self: Sized,
{
assert_stream::<Result<<Self::Ok as TryFuture>::Ok, Self::Error>, _>(TryBuffered::new(
self,
n.into(),
))
}
// TODO: false positive warning from rustdoc. Verify once #43466 settles
//
/// A convenience method for calling [`TryStream::try_poll_next`] on [`Unpin`]
/// stream types.
fn try_poll_next_unpin(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Ok, Self::Error>>>
where
Self: Unpin,
{
Pin::new(self).try_poll_next(cx)
}
/// Wraps a [`TryStream`] into a stream compatible with libraries using
/// futures 0.1 `Stream`. Requires the `compat` feature to be enabled.
/// ```
/// # if cfg!(miri) { return; } // Miri does not support epoll
/// use futures::future::{FutureExt, TryFutureExt};
/// # let (tx, rx) = futures::channel::oneshot::channel();
///
/// let future03 = async {
/// println!("Running on the pool");
/// tx.send(42).unwrap();
/// };
///
/// let future01 = future03
/// .unit_error() // Make it a TryFuture
/// .boxed() // Make it Unpin
/// .compat();
///
/// tokio::run(future01);
/// # assert_eq!(42, futures::executor::block_on(rx).unwrap());
/// ```
#[cfg(feature = "compat")]
#[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
fn compat(self) -> Compat<Self>
where
Self: Sized + Unpin,
{
Compat::new(self)
}
/// Adapter that converts this stream into an [`AsyncRead`](crate::io::AsyncRead).
///
/// Note that because `into_async_read` moves the stream, the [`Stream`](futures_core::stream::Stream) type must be
/// [`Unpin`]. If you want to use `into_async_read` with a [`!Unpin`](Unpin) stream, you'll
/// first have to pin the stream. This can be done by boxing the stream using [`Box::pin`]
/// or pinning it to the stack using the `pin_mut!` macro from the `pin_utils` crate.
///
/// This method is only available when the `std` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
/// use futures::io::AsyncReadExt;
///
/// let stream = stream::iter(vec![Ok(vec![1, 2, 3, 4, 5])]);
/// let mut reader = stream.into_async_read();
/// let mut buf = Vec::new();
///
/// assert!(reader.read_to_end(&mut buf).await.is_ok());
/// assert_eq!(buf, &[1, 2, 3, 4, 5]);
/// # })
/// ```
#[cfg(feature = "io")]
#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
#[cfg(feature = "std")]
fn into_async_read(self) -> IntoAsyncRead<Self>
where
Self: Sized + TryStreamExt<Error = std::io::Error> + Unpin,
Self::Ok: AsRef<[u8]>,
{
crate::io::assert_read(IntoAsyncRead::new(self))
}
}
| 36.683003 | 140 | 0.593223 |
8a789e89c89030194e61206fbaff85a730680890 | 1,467 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::FSM_SAV_PPUL {
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R { bits: self.register.get() }
}
}
#[doc = r" Value of the field"]
pub struct RESERVED12R {
bits: u32,
}
impl RESERVED12R {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct SAV_P_PULR {
bits: u16,
}
impl SAV_P_PULR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 12:31 - Internal. Only to be used through TI provided API."]
#[inline]
pub fn reserved12(&self) -> RESERVED12R {
let bits = {
const MASK: u32 = 1048575;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) as u32
};
RESERVED12R { bits }
}
#[doc = "Bits 0:11 - Internal. Only to be used through TI provided API."]
#[inline]
pub fn sav_p_pul(&self) -> SAV_P_PULR {
let bits = {
const MASK: u16 = 4095;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
SAV_P_PULR { bits }
}
}
| 24.04918 | 78 | 0.528289 |
118654d1585487b341e9a293e9a1098946581a7f | 7,143 | use sqlx::pool::PoolOptions;
use sqlx::{Connection, Database, Pool};
use std::env;
pub fn setup_if_needed() {
let _ = dotenv::dotenv();
let _ = env_logger::builder().is_test(true).try_init();
}
// Make a new connection
// Ensure [dotenv] and [env_logger] have been setup
pub async fn new<DB>() -> anyhow::Result<DB::Connection>
where
DB: Database,
{
setup_if_needed();
Ok(DB::Connection::connect(&env::var("DATABASE_URL")?).await?)
}
// Make a new pool
// Ensure [dotenv] and [env_logger] have been setup
pub async fn pool<DB>() -> anyhow::Result<Pool<DB>>
where
DB: Database,
{
setup_if_needed();
let pool = PoolOptions::<DB>::new()
.min_connections(0)
.max_connections(5)
.test_before_acquire(true)
.connect(&env::var("DATABASE_URL")?)
.await?;
Ok(pool)
}
// Test type encoding and decoding
#[macro_export]
macro_rules! test_type {
($name:ident<$ty:ty>($db:ident, $sql:literal, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::__test_prepared_type!($name<$ty>($db, $sql, $($text == $value),+));
$crate::test_unprepared_type!($name<$ty>($db, $($text == $value),+));
};
($name:ident<$ty:ty>($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
paste::item! {
$crate::__test_prepared_type!($name<$ty>($db, $crate::[< $db _query_for_test_prepared_type >]!(), $($text == $value),+));
$crate::test_unprepared_type!($name<$ty>($db, $($text == $value),+));
}
};
($name:ident($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::test_type!($name<$name>($db, $($text == $value),+));
};
}
// Test type decoding only
#[macro_export]
macro_rules! test_decode_type {
($name:ident<$ty:ty>($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::__test_prepared_decode_type!($name<$ty>($db, $($text == $value),+));
$crate::test_unprepared_type!($name<$ty>($db, $($text == $value),+));
};
($name:ident($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::test_decode_type!($name<$name>($db, $($text == $value),+));
};
}
// Test type encoding and decoding
#[macro_export]
macro_rules! test_prepared_type {
($name:ident<$ty:ty>($db:ident, $sql:literal, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::__test_prepared_type!($name<$ty>($db, $sql, $($text == $value),+));
};
($name:ident<$ty:ty>($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
paste::item! {
$crate::__test_prepared_type!($name<$ty>($db, $crate::[< $db _query_for_test_prepared_type >]!(), $($text == $value),+));
}
};
($name:ident($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
$crate::__test_prepared_type!($name<$name>($db, $($text == $value),+));
};
}
// Test type decoding for the simple (unprepared) query API
#[macro_export]
macro_rules! test_unprepared_type {
($name:ident<$ty:ty>($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
paste::item! {
#[sqlx_macros::test]
async fn [< test_unprepared_type_ $name >] () -> anyhow::Result<()> {
use sqlx::prelude::*;
use futures::TryStreamExt;
let mut conn = sqlx_test::new::<$db>().await?;
$(
let query = format!("SELECT {}", $text);
let mut s = conn.fetch(&*query);
let row = s.try_next().await?.unwrap();
let rec = row.try_get::<$ty, _>(0)?;
assert_eq!($value, rec);
drop(s);
)+
Ok(())
}
}
}
}
// Test type decoding only for the prepared query API
#[macro_export]
macro_rules! __test_prepared_decode_type {
($name:ident<$ty:ty>($db:ident, $($text:literal == $value:expr),+ $(,)?)) => {
paste::item! {
#[sqlx_macros::test]
async fn [< test_prepared_decode_type_ $name >] () -> anyhow::Result<()> {
use sqlx::Row;
let mut conn = sqlx_test::new::<$db>().await?;
$(
let query = format!("SELECT {}", $text);
let row = sqlx::query(&query)
.fetch_one(&mut conn)
.await?;
let rec: $ty = row.try_get(0)?;
assert_eq!($value, rec);
)+
Ok(())
}
}
};
}
// Test type encoding and decoding for the prepared query API
#[macro_export]
macro_rules! __test_prepared_type {
($name:ident<$ty:ty>($db:ident, $sql:expr, $($text:literal == $value:expr),+ $(,)?)) => {
paste::item! {
#[sqlx_macros::test]
async fn [< test_prepared_type_ $name >] () -> anyhow::Result<()> {
use sqlx::Row;
let mut conn = sqlx_test::new::<$db>().await?;
$(
let query = format!($sql, $text);
println!("{query}");
let row = sqlx::query(&query)
.bind($value)
.bind($value)
.fetch_one(&mut conn)
.await?;
let matches: i32 = row.try_get(0)?;
let returned: $ty = row.try_get(1)?;
let round_trip: $ty = row.try_get(2)?;
assert!(matches != 0,
"[1] DB value mismatch; given value: {:?}\n\
as returned: {:?}\n\
round-trip: {:?}",
$value, returned, round_trip);
assert_eq!($value, returned,
"[2] DB value mismatch; given value: {:?}\n\
as returned: {:?}\n\
round-trip: {:?}",
$value, returned, round_trip);
assert_eq!($value, round_trip,
"[3] DB value mismatch; given value: {:?}\n\
as returned: {:?}\n\
round-trip: {:?}",
$value, returned, round_trip);
)+
Ok(())
}
}
};
}
#[macro_export]
macro_rules! MySql_query_for_test_prepared_type {
() => {
// MySQL 8.0.27 changed `<=>` to return an unsigned integer
"SELECT CAST({0} <=> ? AS SIGNED INTEGER), {0}, ?"
};
}
#[macro_export]
macro_rules! Mssql_query_for_test_prepared_type {
() => {
"SELECT CASE WHEN {0} IS NULL AND @p1 IS NULL THEN 1 WHEN {0} = @p1 THEN 1 ELSE 0 END, {0}, @p2"
};
}
#[macro_export]
macro_rules! Sqlite_query_for_test_prepared_type {
() => {
"SELECT {0} is ?, {0}, ?"
};
}
#[macro_export]
macro_rules! Postgres_query_for_test_prepared_type {
() => {
"SELECT ({0} is not distinct from $1)::int4, {0}, $2"
};
}
| 31.606195 | 133 | 0.47739 |
d9e069ab012a777804e2ad199ca2f534325ec785 | 3,440 | <?xml version="1.0" encoding="UTF-8"?>
<WebElementEntity>
<description></description>
<name>div_Home Websites</name>
<tag></tag>
<elementGuidId>84328dd1-78d7-431e-a3cc-e02a741d59a7</elementGuidId>
<selectorCollection>
<entry>
<key>XPATH</key>
<value>//div[@id='main-view']/div/div[3]/div[3]/ul/li/a/div[2]/div[2]/div</value>
</entry>
<entry>
<key>CSS</key>
<value>#main-view > div > div.page-content > div.components-list.searchbar-found.list > ul > li:nth-child(1) > a > div > div.item-inner > div</value>
</entry>
</selectorCollection>
<selectorMethod>CSS</selectorMethod>
<useRalativeImagePath>false</useRalativeImagePath>
<webElementProperties>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>tag</name>
<type>Main</type>
<value>div</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>class</name>
<type>Main</type>
<value>item-title</value>
</webElementProperties>
<webElementProperties>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>text</name>
<type>Main</type>
<value>Home Websites</value>
</webElementProperties>
<webElementProperties>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath</name>
<type>Main</type>
<value>id("main-view")/div[@class="page page-current"]/div[@class="page-content"]/div[@class="components-list searchbar-found list"]/ul[1]/li[1]/a[@class="item-link"]/div[@class="item-content"]/div[@class="item-inner"]/div[@class="item-title"]</value>
</webElementProperties>
<webElementXpaths>
<isSelected>true</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:idRelative</name>
<value>//div[@id='main-view']/div/div[3]/div[3]/ul/li/a/div[2]/div[2]/div</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:neighbor</name>
<value>(.//*[normalize-space(text()) and normalize-space(.)='vpn_lock'])[1]/following::div[2]</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:neighbor</name>
<value>(.//*[normalize-space(text()) and normalize-space(.)='Components'])[1]/following::div[6]</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:neighbor</name>
<value>(.//*[normalize-space(text()) and normalize-space(.)='network_wifi'])[1]/preceding::div[1]</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:neighbor</name>
<value>(.//*[normalize-space(text()) and normalize-space(.)='Live Websites'])[1]/preceding::div[2]</value>
</webElementXpaths>
<webElementXpaths>
<isSelected>false</isSelected>
<matchCondition>equals</matchCondition>
<name>xpath:position</name>
<value>//div[2]/div[2]/div</value>
</webElementXpaths>
</WebElementEntity>
| 40.952381 | 337 | 0.661628 |
08c9a337fdd3b270a7cda48cbffa8a2bf9400f59 | 3,770 | use std::fmt;
use std::fs::{File, OpenOptions};
use std::io::prelude::*;
use std::io::SeekFrom;
use std::path::PathBuf;
use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor};
use serde::ser::{Serialize, SerializeStruct, Serializer};
use super::BackupMemoryInterface;
use crate::util::write_bin_file;
#[derive(Debug)]
pub struct BackupFile {
size: usize,
path: Option<PathBuf>,
file: Option<File>,
buffer: Vec<u8>,
}
impl Clone for BackupFile {
fn clone(&self) -> Self {
BackupFile::new(self.size, self.path.clone())
}
}
impl Serialize for BackupFile {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("BackupFile", 2)?;
state.serialize_field("size", &self.size)?;
state.serialize_field("path", &self.path)?;
state.end()
}
}
impl<'de> Deserialize<'de> for BackupFile {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct BackupFileVisitor;
impl<'de> Visitor<'de> for BackupFileVisitor {
type Value = BackupFile;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("struct BackupFile")
}
fn visit_seq<V>(self, mut seq: V) -> Result<BackupFile, V::Error>
where
V: SeqAccess<'de>,
{
let size = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let path: Option<PathBuf> = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
Ok(BackupFile::new(size, path))
}
}
const FIELDS: &'static [&'static str] = &["size", "path"];
deserializer.deserialize_struct("BackupFile", FIELDS, BackupFileVisitor)
}
}
impl BackupFile {
pub fn new(size: usize, path: Option<PathBuf>) -> BackupFile {
// TODO handle errors without unwrap
let mut file: Option<File> = None;
let buffer = if let Some(path) = &path {
if !path.is_file() {
write_bin_file(&path, &vec![0xff; size]).unwrap();
}
let mut _file = OpenOptions::new()
.read(true)
.write(true)
.open(&path)
.unwrap();
let mut buffer = Vec::new();
_file.read_to_end(&mut buffer).unwrap();
buffer.resize(size, 0xff);
file = Some(_file);
buffer
} else {
vec![0xff; size]
};
BackupFile {
size,
path,
file: file,
buffer: buffer,
}
}
pub fn bytes(&self) -> &[u8] {
&self.buffer
}
pub fn bytes_mut(&mut self) -> &mut [u8] {
&mut self.buffer
}
pub fn flush(&mut self) {
if let Some(file) = &mut self.file {
file.seek(SeekFrom::Start(0)).unwrap();
file.write_all(&self.buffer).unwrap();
}
}
}
impl BackupMemoryInterface for BackupFile {
fn write(&mut self, offset: usize, value: u8) {
self.buffer[offset] = value;
if let Some(file) = &mut self.file {
file.seek(SeekFrom::Start(offset as u64)).unwrap();
file.write_all(&[value]).unwrap();
}
}
fn read(&self, offset: usize) -> u8 {
self.buffer[offset]
}
fn resize(&mut self, new_size: usize) {
self.size = new_size;
self.buffer.resize(new_size, 0xff);
self.flush();
}
}
| 26.737589 | 84 | 0.529708 |
2fdc0dfa00eaf29b99e641624eb0b913dc9afa9c | 5,268 | // Generated from definition io.k8s.api.core.v1.PodAffinityTerm
/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \<topologyKey\> matches that of any node on which a pod of the set of pods is running
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PodAffinityTerm {
/// A label query over a set of resources, in this case pods.
pub label_selector: Option<crate::v1_12::apimachinery::pkg::apis::meta::v1::LabelSelector>,
/// namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
pub namespaces: Option<Vec<String>>,
/// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
pub topology_key: String,
}
impl<'de> serde::Deserialize<'de> for PodAffinityTerm {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_label_selector,
Key_namespaces,
Key_topology_key,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"labelSelector" => Field::Key_label_selector,
"namespaces" => Field::Key_namespaces,
"topologyKey" => Field::Key_topology_key,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = PodAffinityTerm;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct PodAffinityTerm")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_label_selector: Option<crate::v1_12::apimachinery::pkg::apis::meta::v1::LabelSelector> = None;
let mut value_namespaces: Option<Vec<String>> = None;
let mut value_topology_key: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_label_selector => value_label_selector = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_namespaces => value_namespaces = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_topology_key => value_topology_key = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PodAffinityTerm {
label_selector: value_label_selector,
namespaces: value_namespaces,
topology_key: value_topology_key.ok_or_else(|| serde::de::Error::missing_field("topologyKey"))?,
})
}
}
deserializer.deserialize_struct(
"PodAffinityTerm",
&[
"labelSelector",
"namespaces",
"topologyKey",
],
Visitor,
)
}
}
impl serde::Serialize for PodAffinityTerm {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"PodAffinityTerm",
1 +
self.label_selector.as_ref().map_or(0, |_| 1) +
self.namespaces.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.label_selector {
serde::ser::SerializeStruct::serialize_field(&mut state, "labelSelector", value)?;
}
if let Some(value) = &self.namespaces {
serde::ser::SerializeStruct::serialize_field(&mut state, "namespaces", value)?;
}
serde::ser::SerializeStruct::serialize_field(&mut state, "topologyKey", &self.topology_key)?;
serde::ser::SerializeStruct::end(state)
}
}
| 47.035714 | 357 | 0.574791 |
d6453f5b048b6739654f647ed5cf32cddfdaf59e | 214,066 | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::Bound::{Excluded, Unbounded};
use std::collections::VecDeque;
use std::iter::Iterator;
use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
use std::time::Instant;
use std::{cmp, mem, u64};
use batch_system::{BasicMailbox, Fsm};
use collections::{HashMap, HashSet};
use engine_traits::{
Engines, KvEngine, RaftEngine, SSTMetaInfo, WriteBatch, WriteBatchExt, WriteOptions,
};
use engine_traits::{CF_LOCK, CF_RAFT};
use error_code::ErrorCodeExt;
use fail::fail_point;
use keys::{self, enc_end_key, enc_start_key};
use kvproto::errorpb;
use kvproto::import_sstpb::SwitchMode;
use kvproto::kvrpcpb::DiskFullOpt;
use kvproto::metapb::{self, Region, RegionEpoch};
use kvproto::pdpb::{CheckPolicy, StoreStats};
use kvproto::raft_cmdpb::{
AdminCmdType, AdminRequest, CmdType, PutRequest, RaftCmdRequest, RaftCmdResponse, Request,
StatusCmdType, StatusResponse,
};
use kvproto::raft_serverpb::{
ExtraMessage, ExtraMessageType, MergeState, PeerState, RaftApplyState, RaftMessage,
RaftSnapshotData, RaftTruncatedState, RegionLocalState,
};
use kvproto::replication_modepb::{DrAutoSyncState, ReplicationMode};
use parking_lot::RwLockWriteGuard;
use protobuf::Message;
use raft::eraftpb::{self, ConfChangeType, MessageType};
use raft::{
self, GetEntriesContext, Progress, ReadState, SnapshotStatus, StateRole, INVALID_INDEX,
NO_LIMIT,
};
use smallvec::SmallVec;
use tikv_alloc::trace::TraceEvent;
use tikv_util::mpsc::{self, LooseBoundedSender, Receiver};
use tikv_util::sys::disk::DiskUsage;
use tikv_util::sys::memory_usage_reaches_high_water;
use tikv_util::time::{duration_to_sec, monotonic_raw_now, Instant as TiInstant};
use tikv_util::worker::{ScheduleError, Scheduler};
use tikv_util::{box_err, debug, defer, error, info, trace, warn};
use tikv_util::{escape, is_zero_duration, Either};
use txn_types::WriteBatchFlags;
use self::memtrace::*;
use crate::coprocessor::RegionChangeEvent;
use crate::store::cmd_resp::{bind_term, new_error};
use crate::store::fsm::store::{PollContext, StoreMeta};
use crate::store::fsm::{
apply, ApplyMetrics, ApplyTask, ApplyTaskRes, CatchUpLogs, ChangeObserver, ChangePeer,
ExecResult, StoreInfo,
};
use crate::store::hibernate_state::{GroupState, HibernateState};
use crate::store::local_metrics::RaftMetrics;
use crate::store::memory::*;
use crate::store::metrics::*;
use crate::store::msg::{Callback, ExtCallback, InspectedRaftMessage};
use crate::store::peer::{
ConsistencyState, Peer, PersistSnapshotResult, StaleState, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use crate::store::peer_storage::write_peer_state;
use crate::store::transport::Transport;
use crate::store::util::{is_learner, KeysInfoFormatter};
use crate::store::worker::{
ConsistencyCheckTask, RaftlogFetchTask, RaftlogGcTask, ReadDelegate, RegionTask, SplitCheckTask,
};
use crate::store::PdTask;
use crate::store::{
util, AbstractPeer, CasualMessage, Config, MergeResultKind, PeerMsg, PeerTick,
RaftCmdExtraOpts, RaftCommand, SignificantMsg, SnapKey, StoreMsg,
};
use crate::{Error, Result};
#[derive(Clone, Copy, Debug)]
pub struct DelayDestroy {
merged_by_target: bool,
reason: DelayReason,
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum DelayReason {
UnPersistedReady,
UnFlushLogGc,
Shutdown,
}
/// Limits the maximum number of regions returned by error.
///
/// Another choice is using coprocessor batch limit, but 10 should be a good fit in most case.
const MAX_REGIONS_IN_ERROR: usize = 10;
const REGION_SPLIT_SKIP_MAX_COUNT: usize = 3;
pub struct DestroyPeerJob {
pub initialized: bool,
pub region_id: u64,
pub peer: metapb::Peer,
}
pub struct PeerFsm<EK, ER>
where
EK: KvEngine,
ER: RaftEngine,
{
pub peer: Peer<EK, ER>,
/// A registry for all scheduled ticks. This can avoid scheduling ticks twice accidentally.
tick_registry: [bool; PeerTick::VARIANT_COUNT],
/// Ticks for speed up campaign in chaos state.
///
/// Followers will keep ticking in Idle mode to measure how many ticks have been skipped.
/// Once it becomes chaos, those skipped ticks will be ticked so that it can campaign
/// quickly instead of waiting an election timeout.
///
/// This will be reset to 0 once it receives any messages from leader.
missing_ticks: usize,
hibernate_state: HibernateState,
stopped: bool,
has_ready: bool,
mailbox: Option<BasicMailbox<PeerFsm<EK, ER>>>,
pub receiver: Receiver<PeerMsg<EK>>,
/// when snapshot is generating or sending, skip split check at most REGION_SPLIT_SKIT_MAX_COUNT times.
skip_split_count: usize,
/// Sometimes applied raft logs won't be compacted in time, because less compact means less
/// sync-log in apply threads. Stale logs will be deleted if the skip time reaches this
/// `skip_gc_raft_log_ticks`.
skip_gc_raft_log_ticks: usize,
/// Batch raft command which has the same header into an entry
batch_req_builder: BatchRaftCmdRequestBuilder<EK>,
trace: PeerMemoryTrace,
/// Destroy is delayed because of some unpersisted readies in Peer.
/// Should call `destroy_peer` again after persisting all readies.
delayed_destroy: Option<DelayDestroy>,
/// Before actually destroying a peer, ensure all log gc tasks are finished, so we
/// can start destroying without seeking.
logs_gc_flushed: bool,
/// To make sure the reported store/peer meta is up to date, each peer has to wait for the log
/// at its target commit index to be applied. The last peer does so triggers the next procedure
/// which is reporting the store/peer meta to PD.
unsafe_recovery_target_commit_index: Option<u64>,
unsafe_recovery_wait_apply_counter: Option<Arc<AtomicUsize>>,
}
pub struct BatchRaftCmdRequestBuilder<E>
where
E: KvEngine,
{
batch_req_size: u64,
has_proposed_cb: bool,
propose_checked: Option<bool>,
request: Option<RaftCmdRequest>,
callbacks: Vec<Callback<E::Snapshot>>,
}
impl<EK, ER> Drop for PeerFsm<EK, ER>
where
EK: KvEngine,
ER: RaftEngine,
{
fn drop(&mut self) {
self.peer.stop();
let mut raft_messages_size = 0;
while let Ok(msg) = self.receiver.try_recv() {
let callback = match msg {
PeerMsg::RaftCommand(cmd) => cmd.callback,
PeerMsg::CasualMessage(CasualMessage::SplitRegion { callback, .. }) => callback,
PeerMsg::RaftMessage(im) => {
raft_messages_size += im.heap_size;
continue;
}
_ => continue,
};
let mut err = errorpb::Error::default();
err.set_message("region is not found".to_owned());
err.mut_region_not_found().set_region_id(self.region_id());
let mut resp = RaftCmdResponse::default();
resp.mut_header().set_error(err);
callback.invoke_with_response(resp);
}
(match self.hibernate_state.group_state() {
GroupState::Idle | GroupState::PreChaos => &HIBERNATED_PEER_STATE_GAUGE.hibernated,
_ => &HIBERNATED_PEER_STATE_GAUGE.awaken,
})
.dec();
MEMTRACE_RAFT_MESSAGES.trace(TraceEvent::Sub(raft_messages_size));
MEMTRACE_RAFT_ENTRIES.trace(TraceEvent::Sub(self.peer.memtrace_raft_entries));
let mut event = TraceEvent::default();
if let Some(e) = self.trace.reset(PeerMemoryTrace::default()) {
event = event + e;
}
MEMTRACE_PEERS.trace(event);
}
}
pub type SenderFsmPair<EK, ER> = (LooseBoundedSender<PeerMsg<EK>>, Box<PeerFsm<EK, ER>>);
impl<EK, ER> PeerFsm<EK, ER>
where
EK: KvEngine,
ER: RaftEngine,
{
// If we create the peer actively, like bootstrap/split/merge region, we should
// use this function to create the peer. The region must contain the peer info
// for this store.
pub fn create(
store_id: u64,
cfg: &Config,
region_scheduler: Scheduler<RegionTask<EK::Snapshot>>,
raftlog_fetch_scheduler: Scheduler<RaftlogFetchTask>,
engines: Engines<EK, ER>,
region: &metapb::Region,
) -> Result<SenderFsmPair<EK, ER>> {
let meta_peer = match util::find_peer(region, store_id) {
None => {
return Err(box_err!(
"find no peer for store {} in region {:?}",
store_id,
region
));
}
Some(peer) => peer.clone(),
};
info!(
"create peer";
"region_id" => region.get_id(),
"peer_id" => meta_peer.get_id(),
);
HIBERNATED_PEER_STATE_GAUGE.awaken.inc();
let (tx, rx) = mpsc::loose_bounded(cfg.notify_capacity);
Ok((
tx,
Box::new(PeerFsm {
peer: Peer::new(
store_id,
cfg,
region_scheduler,
raftlog_fetch_scheduler,
engines,
region,
meta_peer,
)?,
tick_registry: [false; PeerTick::VARIANT_COUNT],
missing_ticks: 0,
hibernate_state: HibernateState::ordered(),
stopped: false,
has_ready: false,
mailbox: None,
receiver: rx,
skip_split_count: 0,
skip_gc_raft_log_ticks: 0,
batch_req_builder: BatchRaftCmdRequestBuilder::new(),
trace: PeerMemoryTrace::default(),
delayed_destroy: None,
logs_gc_flushed: false,
unsafe_recovery_target_commit_index: None,
unsafe_recovery_wait_apply_counter: None,
}),
))
}
// The peer can be created from another node with raft membership changes, and we only
// know the region_id and peer_id when creating this replicated peer, the region info
// will be retrieved later after applying snapshot.
pub fn replicate(
store_id: u64,
cfg: &Config,
region_scheduler: Scheduler<RegionTask<EK::Snapshot>>,
raftlog_fetch_scheduler: Scheduler<RaftlogFetchTask>,
engines: Engines<EK, ER>,
region_id: u64,
peer: metapb::Peer,
) -> Result<SenderFsmPair<EK, ER>> {
// We will remove tombstone key when apply snapshot
info!(
"replicate peer";
"region_id" => region_id,
"peer_id" => peer.get_id(),
);
let mut region = metapb::Region::default();
region.set_id(region_id);
HIBERNATED_PEER_STATE_GAUGE.awaken.inc();
let (tx, rx) = mpsc::loose_bounded(cfg.notify_capacity);
Ok((
tx,
Box::new(PeerFsm {
peer: Peer::new(
store_id,
cfg,
region_scheduler,
raftlog_fetch_scheduler,
engines,
®ion,
peer,
)?,
tick_registry: [false; PeerTick::VARIANT_COUNT],
missing_ticks: 0,
hibernate_state: HibernateState::ordered(),
stopped: false,
has_ready: false,
mailbox: None,
receiver: rx,
skip_split_count: 0,
skip_gc_raft_log_ticks: 0,
batch_req_builder: BatchRaftCmdRequestBuilder::new(),
trace: PeerMemoryTrace::default(),
delayed_destroy: None,
logs_gc_flushed: false,
unsafe_recovery_target_commit_index: None,
unsafe_recovery_wait_apply_counter: None,
}),
))
}
#[inline]
pub fn region_id(&self) -> u64 {
self.peer.region().get_id()
}
#[inline]
pub fn get_peer(&self) -> &Peer<EK, ER> {
&self.peer
}
#[inline]
pub fn peer_id(&self) -> u64 {
self.peer.peer_id()
}
#[inline]
pub fn stop(&mut self) {
self.stopped = true;
}
pub fn set_pending_merge_state(&mut self, state: MergeState) {
self.peer.pending_merge_state = Some(state);
}
pub fn schedule_applying_snapshot(&mut self) {
self.peer.mut_store().schedule_applying_snapshot();
}
pub fn reset_hibernate_state(&mut self, state: GroupState) {
self.hibernate_state.reset(state);
if state == GroupState::Idle {
self.peer.raft_group.raft.maybe_free_inflight_buffers();
}
}
pub fn maybe_hibernate(&mut self) -> bool {
self.hibernate_state
.maybe_hibernate(self.peer.peer_id(), self.peer.region())
}
pub fn update_memory_trace(&mut self, event: &mut TraceEvent) {
let task = PeerMemoryTrace {
read_only: self.raft_read_size(),
progress: self.raft_progress_size(),
proposals: self.peer.proposal_size(),
rest: self.peer.rest_size(),
};
if let Some(e) = self.trace.reset(task) {
*event = *event + e;
}
}
}
impl<E> BatchRaftCmdRequestBuilder<E>
where
E: KvEngine,
{
fn new() -> BatchRaftCmdRequestBuilder<E> {
BatchRaftCmdRequestBuilder {
batch_req_size: 0,
has_proposed_cb: false,
propose_checked: None,
request: None,
callbacks: vec![],
}
}
fn can_batch(&self, cfg: &Config, req: &RaftCmdRequest, req_size: u32) -> bool {
// No batch request whose size exceed 20% of raft_entry_max_size,
// so total size of request in batch_raft_request would not exceed
// (40% + 20%) of raft_entry_max_size
if req.get_requests().is_empty()
|| req_size as u64 > (cfg.raft_entry_max_size.0 as f64 * 0.2) as u64
{
return false;
}
for r in req.get_requests() {
match r.get_cmd_type() {
CmdType::Delete | CmdType::Put => (),
_ => {
return false;
}
}
}
if let Some(batch_req) = self.request.as_ref() {
if batch_req.get_header() != req.get_header() {
return false;
}
}
true
}
fn add(&mut self, cmd: RaftCommand<E::Snapshot>, req_size: u32) {
let RaftCommand {
mut request,
mut callback,
..
} = cmd;
if let Some(batch_req) = self.request.as_mut() {
let requests: Vec<_> = request.take_requests().into();
for q in requests {
batch_req.mut_requests().push(q);
}
} else {
self.request = Some(request);
};
if callback.has_proposed_cb() {
self.has_proposed_cb = true;
if self.propose_checked.unwrap_or(false) {
callback.invoke_proposed();
}
}
self.callbacks.push(callback);
self.batch_req_size += req_size as u64;
}
fn should_finish(&self, cfg: &Config) -> bool {
if let Some(batch_req) = self.request.as_ref() {
// Limit the size of batch request so that it will not exceed raft_entry_max_size after
// adding header.
if self.batch_req_size > (cfg.raft_entry_max_size.0 as f64 * 0.4) as u64 {
return true;
}
if batch_req.get_requests().len() > <E as WriteBatchExt>::WRITE_BATCH_MAX_KEYS {
return true;
}
}
false
}
fn build(
&mut self,
metric: &mut RaftMetrics,
) -> Option<(RaftCmdRequest, Callback<E::Snapshot>)> {
if let Some(req) = self.request.take() {
self.batch_req_size = 0;
self.has_proposed_cb = false;
self.propose_checked = None;
if self.callbacks.len() == 1 {
let cb = self.callbacks.pop().unwrap();
return Some((req, cb));
}
metric.propose.batch += self.callbacks.len() - 1;
let mut cbs = std::mem::take(&mut self.callbacks);
let proposed_cbs: Vec<ExtCallback> = cbs
.iter_mut()
.filter_map(|cb| {
if let Callback::Write { proposed_cb, .. } = cb {
proposed_cb.take()
} else {
None
}
})
.collect();
let proposed_cb: Option<ExtCallback> = if proposed_cbs.is_empty() {
None
} else {
Some(Box::new(move || {
for proposed_cb in proposed_cbs {
proposed_cb();
}
}))
};
let committed_cbs: Vec<_> = cbs
.iter_mut()
.filter_map(|cb| {
if let Callback::Write { committed_cb, .. } = cb {
committed_cb.take()
} else {
None
}
})
.collect();
let committed_cb: Option<ExtCallback> = if committed_cbs.is_empty() {
None
} else {
Some(Box::new(move || {
for committed_cb in committed_cbs {
committed_cb();
}
}))
};
let times: SmallVec<[TiInstant; 4]> = cbs
.iter_mut()
.filter_map(|cb| {
if let Callback::Write { request_times, .. } = cb {
Some(request_times[0])
} else {
None
}
})
.collect();
let mut cb = Callback::write_ext(
Box::new(move |resp| {
for cb in cbs {
let mut cmd_resp = RaftCmdResponse::default();
cmd_resp.set_header(resp.response.get_header().clone());
cb.invoke_with_response(cmd_resp);
}
}),
proposed_cb,
committed_cb,
);
if let Callback::Write { request_times, .. } = &mut cb {
*request_times = times;
}
return Some((req, cb));
}
None
}
}
impl<EK, ER> Fsm for PeerFsm<EK, ER>
where
EK: KvEngine,
ER: RaftEngine,
{
type Message = PeerMsg<EK>;
#[inline]
fn is_stopped(&self) -> bool {
self.stopped
}
/// Set a mailbox to Fsm, which should be used to send message to itself.
#[inline]
fn set_mailbox(&mut self, mailbox: Cow<'_, BasicMailbox<Self>>)
where
Self: Sized,
{
self.mailbox = Some(mailbox.into_owned());
}
/// Take the mailbox from Fsm. Implementation should ensure there will be
/// no reference to mailbox after calling this method.
#[inline]
fn take_mailbox(&mut self) -> Option<BasicMailbox<Self>>
where
Self: Sized,
{
self.mailbox.take()
}
}
pub struct PeerFsmDelegate<'a, EK, ER, T: 'static>
where
EK: KvEngine,
ER: RaftEngine,
{
fsm: &'a mut PeerFsm<EK, ER>,
ctx: &'a mut PollContext<EK, ER, T>,
}
impl<'a, EK, ER, T: Transport> PeerFsmDelegate<'a, EK, ER, T>
where
EK: KvEngine,
ER: RaftEngine,
{
pub fn new(
fsm: &'a mut PeerFsm<EK, ER>,
ctx: &'a mut PollContext<EK, ER, T>,
) -> PeerFsmDelegate<'a, EK, ER, T> {
PeerFsmDelegate { fsm, ctx }
}
pub fn handle_msgs(&mut self, msgs: &mut Vec<PeerMsg<EK>>) {
for m in msgs.drain(..) {
match m {
PeerMsg::RaftMessage(msg) => {
if let Err(e) = self.on_raft_message(msg) {
error!(%e;
"handle raft message err";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
}
}
PeerMsg::RaftCommand(cmd) => {
self.ctx
.raft_metrics
.propose
.request_wait_time
.observe(duration_to_sec(cmd.send_time.saturating_elapsed()) as f64);
if let Some(Err(e)) = cmd.extra_opts.deadline.map(|deadline| deadline.check()) {
cmd.callback.invoke_with_response(new_error(e.into()));
continue;
}
let req_size = cmd.request.compute_size();
if self.ctx.cfg.cmd_batch
&& self.fsm.batch_req_builder.can_batch(&self.ctx.cfg, &cmd.request, req_size)
// Avoid to merge requests with different `DiskFullOpt`s into one,
// so that normal writes can be rejected when proposing if the
// store's disk is full.
&& ((self.ctx.self_disk_usage == DiskUsage::Normal
&& !self.fsm.peer.disk_full_peers.majority())
|| cmd.extra_opts.disk_full_opt == DiskFullOpt::NotAllowedOnFull)
{
self.fsm.batch_req_builder.add(cmd, req_size);
if self.fsm.batch_req_builder.should_finish(&self.ctx.cfg) {
self.propose_batch_raft_command(true);
}
} else {
self.propose_raft_command(
cmd.request,
cmd.callback,
cmd.extra_opts.disk_full_opt,
)
}
}
PeerMsg::Tick(tick) => self.on_tick(tick),
PeerMsg::ApplyRes { res } => {
self.on_apply_res(res);
}
PeerMsg::SignificantMsg(msg) => self.on_significant_msg(msg),
PeerMsg::CasualMessage(msg) => self.on_casual_msg(msg),
PeerMsg::Start => self.start(),
PeerMsg::HeartbeatPd => {
if self.fsm.peer.is_leader() {
self.register_pd_heartbeat_tick()
}
}
PeerMsg::Noop => {}
PeerMsg::Persisted {
peer_id,
ready_number,
} => self.on_persisted_msg(peer_id, ready_number),
PeerMsg::UpdateReplicationMode => self.on_update_replication_mode(),
PeerMsg::Destroy(peer_id) => {
if self.fsm.peer.peer_id() == peer_id {
match self.fsm.peer.maybe_destroy(self.ctx) {
None => self.ctx.raft_metrics.message_dropped.applying_snap += 1,
Some(job) => {
self.handle_destroy_peer(job);
}
}
}
}
PeerMsg::UpdateRegionForUnsafeRecover(region) => {
self.on_update_region_for_unsafe_recover(region)
}
PeerMsg::UnsafeRecoveryWaitApply(counter) => {
self.on_unsafe_recovery_wait_apply(counter)
}
}
}
// Propose batch request which may be still waiting for more raft-command
if self.ctx.sync_write_worker.is_some() {
self.propose_batch_raft_command(true);
} else {
self.propose_batch_raft_command(false);
self.check_batch_cmd_and_proposed_cb();
}
}
fn propose_batch_raft_command(&mut self, force: bool) {
if self.fsm.batch_req_builder.request.is_none() {
return;
}
if !force
&& self.ctx.cfg.cmd_batch_concurrent_ready_max_count != 0
&& self.fsm.peer.unpersisted_ready_len()
>= self.ctx.cfg.cmd_batch_concurrent_ready_max_count
{
return;
}
fail_point!("propose_batch_raft_command", !force, |_| {});
let (request, callback) = self
.fsm
.batch_req_builder
.build(&mut self.ctx.raft_metrics)
.unwrap();
self.propose_raft_command_internal(request, callback, DiskFullOpt::NotAllowedOnFull)
}
fn check_batch_cmd_and_proposed_cb(&mut self) {
if self.fsm.batch_req_builder.request.is_none()
|| !self.fsm.batch_req_builder.has_proposed_cb
|| self.fsm.batch_req_builder.propose_checked.is_some()
{
return;
}
let cmd = self.fsm.batch_req_builder.request.take().unwrap();
self.fsm.batch_req_builder.propose_checked = Some(false);
if let Ok(None) = self.pre_propose_raft_command(&cmd) {
if self.fsm.peer.will_likely_propose(&cmd) {
self.fsm.batch_req_builder.propose_checked = Some(true);
for cb in &mut self.fsm.batch_req_builder.callbacks {
cb.invoke_proposed();
}
}
}
self.fsm.batch_req_builder.request = Some(cmd);
}
fn on_update_replication_mode(&mut self) {
self.fsm
.peer
.switch_replication_mode(&self.ctx.global_replication_state);
if self.fsm.peer.is_leader() {
self.reset_raft_tick(GroupState::Ordered);
self.register_pd_heartbeat_tick();
}
}
fn on_update_region_for_unsafe_recover(&mut self, region: Region) {
let mut new_peer_list = HashSet::default();
for peer in region.get_peers() {
new_peer_list.insert(peer.get_id());
}
let to_be_removed: Vec<u64> = self
.region()
.get_peers()
.iter()
.filter(|&peer| !new_peer_list.contains(&peer.get_id()))
.map(|peer| peer.get_id())
.collect();
if to_be_removed.is_empty()
&& self.region().get_start_key() == region.get_start_key()
&& self.region().get_end_key() == region.get_end_key()
{
// Nothing to be updated, return directly.
return;
}
info!(
"updating the reigon for unsafe recover, original: {:?}, target: {:?}",
self.region(),
region
);
if self.fsm.peer.has_valid_leader() {
panic!("region update for unsafe recover should only occur in leaderless reigons");
}
if self.fsm.peer.raft_group.store().applied_index()
!= self.fsm.peer.raft_group.store().commit_index()
{
warn!(
"cannot proceed region update for unsafe recover, applied index is not equal to commit index"
);
return;
}
let region_state_key = keys::region_state_key(region.get_id());
let original_region_state = match self
.ctx
.engines
.kv
.get_msg_cf::<RegionLocalState>(CF_RAFT, ®ion_state_key)
{
Ok(Some(region_state)) => region_state,
Ok(None) => {
panic!("Can't find RegionLocalState while updating {:?}", region);
}
Err(e) => {
panic!(
"Fail to look up RegionLocalState while updating {:?} err {:?}",
region, e
);
}
};
let mut kv_wb = self.ctx.engines.kv.write_batch();
write_peer_state(&mut kv_wb, ®ion, PeerState::Normal, None).unwrap_or_else(|e| {
panic!(
"fails to write RegionLocalState {:?} into write brach, err {:?}",
region, e
)
});
let mut write_opts = WriteOptions::new();
write_opts.set_sync(true);
if let Err(e) = kv_wb.write_opt(&write_opts) {
panic!("fail to update RegionLocalstate {:?} err {:?}", region, e);
}
{
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.set_region(
&self.ctx.coprocessor_host,
region.clone(),
&mut self.fsm.peer,
);
if meta
.region_ranges
.remove(&enc_end_key(original_region_state.get_region()))
.is_none()
{
panic!(
"{} original region does not exist in store meta",
self.fsm.peer.tag
);
}
for (_, id) in meta.region_ranges.range((
Excluded(keys::data_key(region.get_start_key())),
Unbounded::<Vec<u8>>,
)) {
let exist_region = &meta.regions[id];
if enc_start_key(exist_region) >= keys::data_end_key(region.get_end_key()) {
break;
}
panic!(
"{:?} is overlapped with an existing region {:?}",
region, exist_region
);
}
if meta
.region_ranges
.insert(enc_end_key(®ion), region.get_id())
.is_some()
{
panic!(
"key conflicts while inserting region {:?} into store meta",
region
);
}
}
for peer_id in to_be_removed.clone() {
let mut cc = eraftpb::ConfChangeV2::default();
let mut ccs = eraftpb::ConfChangeSingle::default();
ccs.set_change_type(eraftpb::ConfChangeType::RemoveNode);
ccs.set_node_id(peer_id);
cc.set_transition(eraftpb::ConfChangeTransition::Auto);
cc.mut_changes().push(ccs);
if let Err(e) = self.fsm.peer.raft_group.apply_conf_change(&cc) {
panic!("fail to apply conf change for unsafe recover {:?}", e);
}
}
self.fsm
.peer
.peer_heartbeats
.retain(|&k, _| new_peer_list.contains(&k));
self.fsm
.peer
.peers_start_pending_time
.retain(|&(k, _)| new_peer_list.contains(&k));
for peer in to_be_removed {
self.fsm.peer.remove_peer_from_cache(peer);
}
self.fsm.peer.post_split();
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
}
fn finish_unsafe_recovery_wait_apply(&mut self) {
if self
.fsm
.unsafe_recovery_wait_apply_counter
.as_ref()
.unwrap()
.fetch_sub(1, Ordering::Relaxed)
== 1
{
let mut stats = StoreStats::default();
stats.set_store_id(self.store_id());
let store_info = StoreInfo {
kv_engine: self.ctx.engines.kv.clone(),
raft_engine: self.ctx.engines.raft.clone(),
capacity: self.ctx.cfg.capacity.0,
};
let task = PdTask::StoreHeartbeat {
stats,
store_info,
send_detailed_report: true,
dr_autosync_status: self
.ctx
.global_replication_state
.lock()
.unwrap()
.store_dr_autosync_status(),
};
if let Err(e) = self.ctx.pd_scheduler.schedule(task) {
panic!("fail to send detailed report to pd {:?}", e);
}
}
self.fsm.unsafe_recovery_target_commit_index = None;
self.fsm.unsafe_recovery_wait_apply_counter = None;
}
fn on_unsafe_recovery_wait_apply(&mut self, counter: Arc<AtomicUsize>) {
self.fsm.unsafe_recovery_target_commit_index =
Some(self.fsm.peer.raft_group.store().commit_index());
self.fsm.unsafe_recovery_wait_apply_counter = Some(counter);
// If the applied index equals to the commit index, there is nothing to wait for, proceeds
// to the next step immediately. If they are not equal, further checks will be performed in
// on_apply_res().
if self.fsm.stopped
|| self.fsm.peer.raft_group.store().applied_index()
== self.fsm.peer.raft_group.store().commit_index()
{
self.finish_unsafe_recovery_wait_apply();
}
}
fn on_casual_msg(&mut self, msg: CasualMessage<EK>) {
match msg {
CasualMessage::SplitRegion {
region_epoch,
split_keys,
callback,
source,
} => {
self.on_prepare_split_region(region_epoch, split_keys, callback, &source);
}
CasualMessage::ComputeHashResult {
index,
context,
hash,
} => {
self.on_hash_computed(index, context, hash);
}
CasualMessage::RegionApproximateSize { size } => {
self.on_approximate_region_size(size);
}
CasualMessage::RegionApproximateKeys { keys } => {
self.on_approximate_region_keys(keys);
}
CasualMessage::CompactionDeclinedBytes { bytes } => {
self.on_compaction_declined_bytes(bytes);
}
CasualMessage::HalfSplitRegion {
region_epoch,
policy,
source,
} => {
self.on_schedule_half_split_region(®ion_epoch, policy, source);
}
CasualMessage::GcSnap { snaps } => {
self.on_gc_snap(snaps);
}
CasualMessage::ClearRegionSize => {
self.on_clear_region_size();
}
CasualMessage::RegionOverlapped => {
debug!("start ticking for overlapped"; "region_id" => self.region_id(), "peer_id" => self.fsm.peer_id());
// Maybe do some safe check first?
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
if is_learner(&self.fsm.peer.peer) {
// FIXME: should use `bcast_check_stale_peer_message` instead.
// Sending a new enum type msg to a old tikv may cause panic during rolling update
// we should change the protobuf behavior and check if properly handled in all place
self.fsm.peer.bcast_wake_up_message(self.ctx);
}
}
CasualMessage::SnapshotGenerated => {
// Resume snapshot handling again to avoid waiting another heartbeat.
self.fsm.peer.ping();
self.fsm.has_ready = true;
}
CasualMessage::ForceCompactRaftLogs => {
self.on_raft_gc_log_tick(true);
}
CasualMessage::AccessPeer(cb) => cb(self.fsm as &mut dyn AbstractPeer),
CasualMessage::QueryRegionLeaderResp { region, leader } => {
// the leader already updated
if self.fsm.peer.raft_group.raft.leader_id != raft::INVALID_ID
// the returned region is stale
|| util::is_epoch_stale(
region.get_region_epoch(),
self.fsm.peer.region().get_region_epoch(),
)
{
// Stale message
return;
}
// Wake up the leader if the peer is on the leader's peer list
if region
.get_peers()
.iter()
.any(|p| p.get_id() == self.fsm.peer_id())
{
self.fsm.peer.send_wake_up_message(self.ctx, &leader);
}
}
CasualMessage::RenewLease => {
self.try_renew_leader_lease();
self.reset_raft_tick(GroupState::Ordered);
}
CasualMessage::RejectRaftAppend { peer_id } => {
let mut msg = raft::eraftpb::Message::new();
msg.msg_type = MessageType::MsgUnreachable;
msg.to = peer_id;
msg.from = self.fsm.peer.peer_id();
let raft_msg = self.fsm.peer.build_raft_messages(self.ctx, vec![msg]);
self.fsm.peer.send_raft_messages(self.ctx, raft_msg);
}
}
}
fn on_tick(&mut self, tick: PeerTick) {
if self.fsm.stopped {
return;
}
trace!(
"tick";
"tick" => ?tick,
"peer_id" => self.fsm.peer_id(),
"region_id" => self.region_id(),
);
self.fsm.tick_registry[tick as usize] = false;
self.fsm.peer.adjust_cfg_if_changed(self.ctx);
match tick {
PeerTick::Raft => self.on_raft_base_tick(),
PeerTick::RaftLogGc => self.on_raft_gc_log_tick(false),
PeerTick::PdHeartbeat => self.on_pd_heartbeat_tick(),
PeerTick::SplitRegionCheck => self.on_split_region_check_tick(),
PeerTick::CheckMerge => self.on_check_merge(),
PeerTick::CheckPeerStaleState => self.on_check_peer_stale_state_tick(),
PeerTick::EntryCacheEvict => self.on_entry_cache_evict_tick(),
PeerTick::CheckLeaderLease => self.on_check_leader_lease_tick(),
}
}
fn start(&mut self) {
self.register_raft_base_tick();
self.register_raft_gc_log_tick();
self.register_pd_heartbeat_tick();
self.register_split_region_check_tick();
self.register_check_peer_stale_state_tick();
self.on_check_merge();
// Apply committed entries more quickly.
// Or if it's a leader. This implicitly means it's a singleton
// because it becomes leader in `Peer::new` when it's a
// singleton. It has a no-op entry that need to be persisted,
// committed, and then it should apply it.
if self.fsm.peer.raft_group.store().commit_index()
> self.fsm.peer.raft_group.store().applied_index()
|| self.fsm.peer.is_leader()
{
self.fsm.has_ready = true;
}
}
fn on_gc_snap(&mut self, snaps: Vec<(SnapKey, bool)>) {
let is_applying_snap = self.fsm.peer.is_handling_snapshot();
let s = self.fsm.peer.get_store();
let compacted_idx = s.truncated_index();
let compacted_term = s.truncated_term();
for (key, is_sending) in snaps {
if is_sending {
let s = match self.ctx.snap_mgr.get_snapshot_for_gc(&key, is_sending) {
Ok(s) => s,
Err(e) => {
error!(%e;
"failed to load snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snapshot" => ?key,
);
continue;
}
};
if key.term < compacted_term || key.idx < compacted_idx {
info!(
"deleting compacted snap file";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snap_file" => %key,
);
self.ctx.snap_mgr.delete_snapshot(&key, s.as_ref(), false);
} else if let Ok(meta) = s.meta() {
let modified = match meta.modified() {
Ok(m) => m,
Err(e) => {
error!(
"failed to load snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snapshot" => ?key,
"err" => %e,
);
continue;
}
};
if let Ok(elapsed) = modified.elapsed() {
if elapsed > self.ctx.cfg.snap_gc_timeout.0 {
info!(
"deleting expired snap file";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snap_file" => %key,
);
self.ctx.snap_mgr.delete_snapshot(&key, s.as_ref(), false);
}
}
}
} else if key.term <= compacted_term
&& (key.idx < compacted_idx
|| key.idx == compacted_idx
&& !is_applying_snap
&& !self.fsm.peer.pending_remove)
{
info!(
"deleting applied snap file";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snap_file" => %key,
);
let a = match self.ctx.snap_mgr.get_snapshot_for_gc(&key, is_sending) {
Ok(a) => a,
Err(e) => {
error!(%e;
"failed to load snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snap_file" => %key,
);
continue;
}
};
self.ctx.snap_mgr.delete_snapshot(&key, a.as_ref(), false);
}
}
}
fn on_clear_region_size(&mut self) {
self.fsm.peer.approximate_size = None;
self.fsm.peer.approximate_keys = None;
self.fsm.peer.has_calculated_region_size = false;
self.register_split_region_check_tick();
}
fn on_capture_change(
&mut self,
cmd: ChangeObserver,
region_epoch: RegionEpoch,
cb: Callback<EK::Snapshot>,
) {
fail_point!("raft_on_capture_change");
let region_id = self.region_id();
let msg =
new_read_index_request(region_id, region_epoch.clone(), self.fsm.peer.peer.clone());
let apply_router = self.ctx.apply_router.clone();
self.propose_raft_command_internal(
msg,
Callback::Read(Box::new(move |resp| {
// Return the error
if resp.response.get_header().has_error() {
cb.invoke_read(resp);
return;
}
apply_router.schedule_task(
region_id,
ApplyTask::Change {
cmd,
region_epoch,
cb,
},
)
})),
DiskFullOpt::NotAllowedOnFull,
);
}
fn on_significant_msg(&mut self, msg: SignificantMsg<EK::Snapshot>) {
match msg {
SignificantMsg::SnapshotStatus {
to_peer_id, status, ..
} => {
// Report snapshot status to the corresponding peer.
self.report_snapshot_status(to_peer_id, status);
}
SignificantMsg::Unreachable { to_peer_id, .. } => {
if self.fsm.peer.is_leader() {
self.fsm.peer.raft_group.report_unreachable(to_peer_id);
} else if to_peer_id == self.fsm.peer.leader_id() {
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
}
}
SignificantMsg::StoreUnreachable { store_id } => {
if let Some(peer_id) = util::find_peer(self.region(), store_id).map(|p| p.get_id())
{
if self.fsm.peer.is_leader() {
self.fsm.peer.raft_group.report_unreachable(peer_id);
} else if peer_id == self.fsm.peer.leader_id() {
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
}
}
}
SignificantMsg::MergeResult {
target_region_id,
target,
result,
} => {
self.on_merge_result(target_region_id, target, result);
}
SignificantMsg::CatchUpLogs(catch_up_logs) => {
self.on_catch_up_logs_for_merge(catch_up_logs);
}
SignificantMsg::StoreResolved { group_id, .. } => {
let state = self.ctx.global_replication_state.lock().unwrap();
if state.status().get_mode() != ReplicationMode::DrAutoSync {
return;
}
if state.status().get_dr_auto_sync().get_state() == DrAutoSyncState::Async {
return;
}
drop(state);
self.fsm
.peer
.raft_group
.raft
.assign_commit_groups(&[(self.fsm.peer_id(), group_id)]);
}
SignificantMsg::CaptureChange {
cmd,
region_epoch,
callback,
} => self.on_capture_change(cmd, region_epoch, callback),
SignificantMsg::LeaderCallback(cb) => {
self.on_leader_callback(cb);
}
SignificantMsg::RaftLogGcFlushed => {
self.on_raft_log_gc_flushed();
}
SignificantMsg::RaftlogFetched { context, res } => {
let low = res.low;
if self.fsm.peer.term() != res.term {
self.fsm.peer.mut_store().clean_async_fetch_res(low);
} else {
self.fsm
.peer
.mut_store()
.update_async_fetch_res(low, Some(res));
}
self.fsm.peer.raft_group.on_entries_fetched(context);
// clean the async fetch result immediately if not used to free memory
self.fsm.peer.mut_store().update_async_fetch_res(low, None);
self.fsm.has_ready = true;
}
}
}
fn on_persisted_msg(&mut self, peer_id: u64, ready_number: u64) {
if peer_id != self.fsm.peer_id() {
error!(
"peer id not match";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"persisted_peer_id" => peer_id,
"persisted_number" => ready_number,
);
return;
}
if let Some(persist_snap_res) = self.fsm.peer.on_persist_ready(self.ctx, ready_number) {
self.on_ready_persist_snapshot(persist_snap_res);
if self.fsm.peer.pending_merge_state.is_some() {
// After applying a snapshot, merge is rollbacked implicitly.
self.on_ready_rollback_merge(0, None);
}
self.register_raft_base_tick();
}
self.fsm.has_ready = true;
if let Some(delay) = self.fsm.delayed_destroy {
if delay.reason == DelayReason::UnPersistedReady
&& !self.fsm.peer.has_unpersisted_ready()
{
self.destroy_peer(delay.merged_by_target);
}
}
}
pub fn post_raft_ready_append(&mut self) {
if let Some(persist_snap_res) = self.fsm.peer.handle_raft_ready_advance(self.ctx) {
self.on_ready_persist_snapshot(persist_snap_res);
if self.fsm.peer.pending_merge_state.is_some() {
// After applying a snapshot, merge is rollbacked implicitly.
self.on_ready_rollback_merge(0, None);
}
self.register_raft_base_tick();
}
}
fn report_snapshot_status(&mut self, to_peer_id: u64, status: SnapshotStatus) {
let to_peer = match self.fsm.peer.get_peer_from_cache(to_peer_id) {
Some(peer) => peer,
None => {
// If to_peer is gone, ignore this snapshot status
warn!(
"peer not found, ignore snapshot status";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"to_peer_id" => to_peer_id,
"status" => ?status,
);
return;
}
};
info!(
"report snapshot status";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"to" => ?to_peer,
"status" => ?status,
);
self.fsm.peer.raft_group.report_snapshot(to_peer_id, status)
}
fn on_leader_callback(&mut self, cb: Callback<EK::Snapshot>) {
let msg = new_read_index_request(
self.region_id(),
self.region().get_region_epoch().clone(),
self.fsm.peer.peer.clone(),
);
self.propose_raft_command_internal(msg, cb, DiskFullOpt::NotAllowedOnFull);
}
fn on_role_changed(&mut self, role: Option<StateRole>) {
// Update leader lease when the Raft state changes.
if let Some(r) = role {
if StateRole::Leader == r {
self.fsm.missing_ticks = 0;
self.register_split_region_check_tick();
self.fsm.peer.heartbeat_pd(self.ctx);
self.register_pd_heartbeat_tick();
self.register_raft_gc_log_tick();
self.register_check_leader_lease_tick();
}
}
}
/// Collect ready if any.
///
/// Returns false is no readiness is generated.
pub fn collect_ready(&mut self) -> bool {
let has_ready = self.fsm.has_ready;
self.fsm.has_ready = false;
if !has_ready || self.fsm.stopped {
return false;
}
self.ctx.pending_count += 1;
self.ctx.has_ready = true;
let res = self.fsm.peer.handle_raft_ready_append(self.ctx);
if let Some(r) = res {
self.on_role_changed(r.state_role);
if r.has_new_entries {
self.register_raft_gc_log_tick();
self.register_entry_cache_evict_tick();
}
self.ctx.ready_count += 1;
self.ctx.raft_metrics.ready.has_ready_region += 1;
if self.fsm.peer.leader_unreachable {
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
self.fsm.peer.leader_unreachable = false;
}
return r.has_write_ready;
}
false
}
#[inline]
fn region_id(&self) -> u64 {
self.fsm.peer.region().get_id()
}
#[inline]
fn region(&self) -> &Region {
self.fsm.peer.region()
}
#[inline]
fn store_id(&self) -> u64 {
self.fsm.peer.peer.get_store_id()
}
#[inline]
fn schedule_tick(&mut self, tick: PeerTick) {
let idx = tick as usize;
if self.fsm.tick_registry[idx] {
return;
}
if is_zero_duration(&self.ctx.tick_batch[idx].wait_duration) {
return;
}
trace!(
"schedule tick";
"tick" => ?tick,
"timeout" => ?self.ctx.tick_batch[idx].wait_duration,
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
);
self.fsm.tick_registry[idx] = true;
let region_id = self.region_id();
let mb = match self.ctx.router.mailbox(region_id) {
Some(mb) => mb,
None => {
self.fsm.tick_registry[idx] = false;
error!(
"failed to get mailbox";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"tick" => ?tick,
);
return;
}
};
let peer_id = self.fsm.peer.peer_id();
let cb = Box::new(move || {
// This can happen only when the peer is about to be destroyed
// or the node is shutting down. So it's OK to not to clean up
// registry.
if let Err(e) = mb.force_send(PeerMsg::Tick(tick)) {
debug!(
"failed to schedule peer tick";
"region_id" => region_id,
"peer_id" => peer_id,
"tick" => ?tick,
"err" => %e,
);
}
});
self.ctx.tick_batch[idx].ticks.push(cb);
}
fn register_raft_base_tick(&mut self) {
// If we register raft base tick failed, the whole raft can't run correctly,
// TODO: shutdown the store?
self.schedule_tick(PeerTick::Raft)
}
fn on_raft_base_tick(&mut self) {
if self.fsm.peer.pending_remove {
self.fsm.peer.mut_store().flush_cache_metrics();
return;
}
// When having pending snapshot, if election timeout is met, it can't pass
// the pending conf change check because first index has been updated to
// a value that is larger than last index.
if self.fsm.peer.is_handling_snapshot() || self.fsm.peer.has_pending_snapshot() {
// need to check if snapshot is applied.
self.fsm.has_ready = true;
self.fsm.missing_ticks = 0;
self.register_raft_base_tick();
return;
}
self.fsm.peer.retry_pending_reads(&self.ctx.cfg);
let mut res = None;
if self.ctx.cfg.hibernate_regions {
if self.fsm.hibernate_state.group_state() == GroupState::Idle {
// missing_ticks should be less than election timeout ticks otherwise
// follower may tick more than an election timeout in chaos state.
// Before stopping tick, `missing_tick` should be `raft_election_timeout_ticks` - 2
// - `raft_heartbeat_ticks` (default 10 - 2 - 2 = 6)
// and the follwer's `election_elapsed` in raft-rs is 1.
// After the group state becomes Chaos, the next tick will call `raft_group.tick`
// `missing_tick` + 1 times(default 7).
// Then the follower's `election_elapsed` will be 1 + `missing_tick` + 1
// (default 1 + 6 + 1 = 8) which is less than the min election timeout.
// The reason is that we don't want let all followers become (pre)candidate if one
// follower may receive a request, then becomes (pre)candidate and sends (pre)vote msg
// to others. As long as the leader can wake up and broadcast hearbeats in one `raft_heartbeat_ticks`
// time(default 2s), no more followers will wake up and sends vote msg again.
if self.fsm.missing_ticks + 2 + self.ctx.cfg.raft_heartbeat_ticks
< self.ctx.cfg.raft_election_timeout_ticks
{
self.register_raft_base_tick();
self.fsm.missing_ticks += 1;
}
return;
}
res = Some(self.fsm.peer.check_before_tick(&self.ctx.cfg));
if self.fsm.missing_ticks > 0 {
for _ in 0..self.fsm.missing_ticks {
if self.fsm.peer.raft_group.tick() {
self.fsm.has_ready = true;
}
}
self.fsm.missing_ticks = 0;
}
}
if self.fsm.peer.raft_group.tick() {
self.fsm.has_ready = true;
}
self.fsm.peer.mut_store().flush_cache_metrics();
// Keep ticking if there are still pending read requests or this node is within hibernate timeout.
if res.is_none() /* hibernate_region is false */ ||
!self.fsm.peer.check_after_tick(self.fsm.hibernate_state.group_state(), res.unwrap()) ||
(self.fsm.peer.is_leader() && !self.all_agree_to_hibernate())
{
self.register_raft_base_tick();
// We need pd heartbeat tick to collect down peers and pending peers.
self.register_pd_heartbeat_tick();
return;
}
// Keep ticking if there are disk full peers for the Region.
if !self.fsm.peer.disk_full_peers.is_empty() {
self.register_raft_base_tick();
return;
}
debug!("stop ticking"; "region_id" => self.region_id(), "peer_id" => self.fsm.peer_id(), "res" => ?res);
self.fsm.reset_hibernate_state(GroupState::Idle);
// Followers will stop ticking at L789. Keep ticking for followers
// to allow it to campaign quickly when abnormal situation is detected.
if !self.fsm.peer.is_leader() {
self.register_raft_base_tick();
} else {
self.register_pd_heartbeat_tick();
}
}
fn on_apply_res(&mut self, res: ApplyTaskRes<EK::Snapshot>) {
fail_point!("on_apply_res", |_| {});
match res {
ApplyTaskRes::Apply(mut res) => {
debug!(
"async apply finish";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"res" => ?res,
);
self.on_ready_result(&mut res.exec_res, &res.metrics);
if self.fsm.stopped {
return;
}
let applied_index = res.apply_state.applied_index;
self.fsm.has_ready |= self.fsm.peer.post_apply(
self.ctx,
res.apply_state,
res.applied_index_term,
&res.metrics,
);
// After applying, several metrics are updated, report it to pd to
// get fair schedule.
if self.fsm.peer.is_leader() {
self.register_pd_heartbeat_tick();
self.register_split_region_check_tick();
self.retry_pending_prepare_merge(applied_index);
}
}
ApplyTaskRes::Destroy {
region_id,
peer_id,
merge_from_snapshot,
} => {
assert_eq!(peer_id, self.fsm.peer.peer_id());
if !merge_from_snapshot {
self.destroy_peer(false);
} else {
// Wait for its target peer to apply snapshot and then send `MergeResult` back
// to destroy itself
let mut meta = self.ctx.store_meta.lock().unwrap();
// The `need_atomic` flag must be true
assert!(*meta.destroyed_region_for_snap.get(®ion_id).unwrap());
let target_region_id = *meta.targets_map.get(®ion_id).unwrap();
let is_ready = meta
.atomic_snap_regions
.get_mut(&target_region_id)
.unwrap()
.get_mut(®ion_id)
.unwrap();
*is_ready = true;
}
}
}
// After a log has been applied, check if we need to trigger the unsafe recovery reporting procedure.
if let Some(target_commit_index) = self.fsm.unsafe_recovery_target_commit_index {
if self.fsm.peer.raft_group.store().applied_index() >= target_commit_index {
self.finish_unsafe_recovery_wait_apply();
}
}
}
fn retry_pending_prepare_merge(&mut self, applied_index: u64) {
if self.fsm.peer.prepare_merge_fence > 0
&& applied_index >= self.fsm.peer.prepare_merge_fence
{
if let Some(pending_prepare_merge) = self.fsm.peer.pending_prepare_merge.take() {
self.propose_raft_command_internal(
pending_prepare_merge,
Callback::None,
DiskFullOpt::AllowedOnAlmostFull,
);
}
// When applied index reaches prepare_merge_fence, always clear the fence.
// So, even if the PrepareMerge fails to propose, we can ensure the region
// will be able to serve again.
self.fsm.peer.prepare_merge_fence = 0;
assert!(self.fsm.peer.pending_prepare_merge.is_none());
}
}
// If lease expired, we will send a noop read index to renew lease.
fn try_renew_leader_lease(&mut self) {
if !self.fsm.peer.is_leader() {
return;
}
if let Err(e) = self.fsm.peer.pre_read_index() {
debug!(
"prevent unsafe read index to renew leader lease";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => ?e,
);
self.ctx.raft_metrics.propose.unsafe_read_index += 1;
return;
}
let current_time = *self.ctx.current_time.get_or_insert_with(monotonic_raw_now);
if self.fsm.peer.need_renew_lease_at(self.ctx, current_time) {
let mut cmd = new_read_index_request(
self.region_id(),
self.region().get_region_epoch().clone(),
self.fsm.peer.peer.clone(),
);
cmd.mut_header().set_read_quorum(true);
self.propose_raft_command_internal(
cmd,
Callback::Read(Box::new(|_| ())),
DiskFullOpt::AllowedOnAlmostFull,
);
}
}
fn handle_reported_disk_usage(&mut self, msg: &RaftMessage) {
let store_id = msg.get_from_peer().get_store_id();
let peer_id = msg.get_from_peer().get_id();
let refill_disk_usages = if matches!(msg.disk_usage, DiskUsage::Normal) {
self.ctx.store_disk_usages.remove(&store_id);
if !self.fsm.peer.is_leader() {
return;
}
self.fsm.peer.disk_full_peers.has(peer_id)
} else {
self.ctx.store_disk_usages.insert(store_id, msg.disk_usage);
if !self.fsm.peer.is_leader() {
return;
}
let disk_full_peers = &self.fsm.peer.disk_full_peers;
disk_full_peers.is_empty()
|| disk_full_peers
.get(peer_id)
.map_or(true, |x| x != msg.disk_usage)
};
if refill_disk_usages || self.fsm.peer.has_region_merge_proposal {
let prev = self.fsm.peer.disk_full_peers.get(peer_id);
if Some(msg.disk_usage) != prev {
info!(
"reported disk usage changes {:?} -> {:?}", prev, msg.disk_usage;
"region_id" => self.fsm.region_id(),
"peer_id" => peer_id,
);
}
self.fsm.peer.refill_disk_full_peers(self.ctx);
debug!(
"raft message refills disk full peers to {:?}",
self.fsm.peer.disk_full_peers;
"region_id" => self.fsm.region_id(),
);
}
}
fn on_raft_message(&mut self, msg: InspectedRaftMessage) -> Result<()> {
let InspectedRaftMessage { heap_size, mut msg } = msg;
let peer_disk_usage = msg.disk_usage;
let stepped = Cell::new(false);
let memtrace_raft_entries = &mut self.fsm.peer.memtrace_raft_entries as *mut usize;
defer!({
MEMTRACE_RAFT_MESSAGES.trace(TraceEvent::Sub(heap_size));
if stepped.get() {
unsafe {
// It could be less than exact for entry overwritting.
*memtrace_raft_entries += heap_size;
MEMTRACE_RAFT_ENTRIES.trace(TraceEvent::Add(heap_size));
}
}
});
debug!(
"handle raft message";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"message_type" => %util::MsgType(&msg),
"from_peer_id" => msg.get_from_peer().get_id(),
"to_peer_id" => msg.get_to_peer().get_id(),
);
if self.fsm.peer.pending_remove || self.fsm.stopped {
return Ok(());
}
self.handle_reported_disk_usage(&msg);
let msg_type = msg.get_message().get_msg_type();
if matches!(self.ctx.self_disk_usage, DiskUsage::AlreadyFull)
&& MessageType::MsgTimeoutNow == msg_type
{
debug!(
"skip {:?} because of disk full", msg_type;
"region_id" => self.region_id(), "peer_id" => self.fsm.peer_id()
);
self.ctx.raft_metrics.message_dropped.disk_full += 1;
return Ok(());
}
if !self.validate_raft_msg(&msg) {
return Ok(());
}
if msg.get_is_tombstone() {
// we receive a message tells us to remove ourself.
self.handle_gc_peer_msg(&msg);
return Ok(());
}
if msg.has_merge_target() {
fail_point!("on_has_merge_target", |_| Ok(()));
if self.need_gc_merge(&msg)? {
self.on_stale_merge(msg.get_merge_target().get_id());
}
return Ok(());
}
if self.check_msg(&msg) {
return Ok(());
}
if msg.has_extra_msg() {
self.on_extra_message(msg);
return Ok(());
}
let is_snapshot = msg.get_message().has_snapshot();
// TODO: spin off the I/O code (delete_snapshot)
let regions_to_destroy = match self.check_snapshot(&msg)? {
Either::Left(key) => {
// If the snapshot file is not used again, then it's OK to
// delete them here. If the snapshot file will be reused when
// receiving, then it will fail to pass the check again, so
// missing snapshot files should not be noticed.
let s = self.ctx.snap_mgr.get_snapshot_for_applying(&key)?;
self.ctx.snap_mgr.delete_snapshot(&key, s.as_ref(), false);
return Ok(());
}
Either::Right(v) => v,
};
if util::is_vote_msg(msg.get_message())
|| msg.get_message().get_msg_type() == MessageType::MsgTimeoutNow
{
if self.fsm.hibernate_state.group_state() != GroupState::Chaos {
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
}
} else if msg.get_from_peer().get_id() == self.fsm.peer.leader_id() {
self.reset_raft_tick(GroupState::Ordered);
}
let from_peer_id = msg.get_from_peer().get_id();
self.fsm.peer.insert_peer_cache(msg.take_from_peer());
let result = if msg.get_message().get_msg_type() == MessageType::MsgTransferLeader {
self.on_transfer_leader_msg(msg.get_message(), peer_disk_usage);
Ok(())
} else {
self.fsm.peer.step(self.ctx, msg.take_message())
};
stepped.set(result.is_ok());
if is_snapshot {
if !self.fsm.peer.has_pending_snapshot() {
// This snapshot is rejected by raft-rs.
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.pending_snapshot_regions
.retain(|r| self.fsm.region_id() != r.get_id());
} else {
// This snapshot may be accepted by raft-rs.
// If it's rejected by raft-rs, the snapshot region in `pending_snapshot_regions`
// will be removed together with the latest snapshot region after applying that snapshot.
// But if `regions_to_destroy` is not empty, the pending snapshot must be this msg's snapshot
// because this kind of snapshot is exclusive.
self.destroy_regions_for_snapshot(regions_to_destroy);
}
}
result?;
if self.fsm.peer.any_new_peer_catch_up(from_peer_id) {
self.fsm.peer.heartbeat_pd(self.ctx);
self.fsm.peer.should_wake_up = true;
}
if self.fsm.peer.should_wake_up {
self.reset_raft_tick(GroupState::Ordered);
}
self.fsm.has_ready = true;
Ok(())
}
fn all_agree_to_hibernate(&mut self) -> bool {
if self.fsm.maybe_hibernate() {
return true;
}
if !self
.fsm
.hibernate_state
.should_bcast(&self.ctx.feature_gate)
{
return false;
}
for peer in self.fsm.peer.region().get_peers() {
if peer.get_id() == self.fsm.peer.peer_id() {
continue;
}
let mut extra = ExtraMessage::default();
extra.set_type(ExtraMessageType::MsgHibernateRequest);
self.fsm
.peer
.send_extra_message(extra, &mut self.ctx.trans, peer);
}
false
}
fn on_hibernate_request(&mut self, from: &metapb::Peer) {
if !self.ctx.cfg.hibernate_regions
|| self.fsm.peer.has_uncommitted_log()
|| from.get_id() != self.fsm.peer.leader_id()
{
// Ignore the message means rejecting implicitly.
return;
}
let mut extra = ExtraMessage::default();
extra.set_type(ExtraMessageType::MsgHibernateResponse);
self.fsm
.peer
.send_extra_message(extra, &mut self.ctx.trans, from);
}
fn on_hibernate_response(&mut self, from: &metapb::Peer) {
if !self.fsm.peer.is_leader() {
return;
}
if self
.fsm
.peer
.region()
.get_peers()
.iter()
.all(|p| p.get_id() != from.get_id())
{
return;
}
self.fsm.hibernate_state.count_vote(from.get_id());
}
fn on_extra_message(&mut self, mut msg: RaftMessage) {
match msg.get_extra_msg().get_type() {
ExtraMessageType::MsgRegionWakeUp | ExtraMessageType::MsgCheckStalePeer => {
if self.fsm.hibernate_state.group_state() == GroupState::Idle {
self.reset_raft_tick(GroupState::Ordered);
}
if msg.get_extra_msg().get_type() == ExtraMessageType::MsgRegionWakeUp
&& self.fsm.peer.is_leader()
{
self.fsm.peer.raft_group.raft.ping();
}
}
ExtraMessageType::MsgWantRollbackMerge => {
self.fsm.peer.maybe_add_want_rollback_merge_peer(
msg.get_from_peer().get_id(),
msg.get_extra_msg(),
);
}
ExtraMessageType::MsgCheckStalePeerResponse => {
self.fsm.peer.on_check_stale_peer_response(
msg.get_region_epoch().get_conf_ver(),
msg.mut_extra_msg().take_check_peers().into(),
);
}
ExtraMessageType::MsgHibernateRequest => {
self.on_hibernate_request(msg.get_from_peer());
}
ExtraMessageType::MsgHibernateResponse => {
self.on_hibernate_response(msg.get_from_peer());
}
}
}
fn reset_raft_tick(&mut self, state: GroupState) {
self.fsm.reset_hibernate_state(state);
self.fsm.missing_ticks = 0;
self.fsm.peer.should_wake_up = false;
self.register_raft_base_tick();
if self.fsm.peer.is_leader() {
self.register_check_leader_lease_tick();
}
}
// return false means the message is invalid, and can be ignored.
fn validate_raft_msg(&mut self, msg: &RaftMessage) -> bool {
let region_id = msg.get_region_id();
let to = msg.get_to_peer();
if to.get_store_id() != self.store_id() {
warn!(
"store not match, ignore it";
"region_id" => region_id,
"to_store_id" => to.get_store_id(),
"my_store_id" => self.store_id(),
);
self.ctx.raft_metrics.message_dropped.mismatch_store_id += 1;
return false;
}
if !msg.has_region_epoch() {
error!(
"missing epoch in raft message, ignore it";
"region_id" => region_id,
);
self.ctx.raft_metrics.message_dropped.mismatch_region_epoch += 1;
return false;
}
true
}
/// Checks if the message is sent to the correct peer.
///
/// Returns true means that the message can be dropped silently.
fn check_msg(&mut self, msg: &RaftMessage) -> bool {
let from_epoch = msg.get_region_epoch();
let from_store_id = msg.get_from_peer().get_store_id();
// Let's consider following cases with three nodes [1, 2, 3] and 1 is leader:
// a. 1 removes 2, 2 may still send MsgAppendResponse to 1.
// We should ignore this stale message and let 2 remove itself after
// applying the ConfChange log.
// b. 2 is isolated, 1 removes 2. When 2 rejoins the cluster, 2 will
// send stale MsgRequestVote to 1 and 3, at this time, we should tell 2 to gc itself.
// c. 2 is isolated but can communicate with 3. 1 removes 3.
// 2 will send stale MsgRequestVote to 3, 3 should ignore this message.
// d. 2 is isolated but can communicate with 3. 1 removes 2, then adds 4, remove 3.
// 2 will send stale MsgRequestVote to 3, 3 should tell 2 to gc itself.
// e. 2 is isolated. 1 adds 4, 5, 6, removes 3, 1. Now assume 4 is leader.
// After 2 rejoins the cluster, 2 may send stale MsgRequestVote to 1 and 3,
// 1 and 3 will ignore this message. Later 4 will send messages to 2 and 2 will
// rejoin the raft group again.
// f. 2 is isolated. 1 adds 4, 5, 6, removes 3, 1. Now assume 4 is leader, and 4 removes 2.
// unlike case e, 2 will be stale forever.
// TODO: for case f, if 2 is stale for a long time, 2 will communicate with pd and pd will
// tell 2 is stale, so 2 can remove itself.
let self_epoch = self.fsm.peer.region().get_region_epoch();
if util::is_epoch_stale(from_epoch, self_epoch)
&& util::find_peer(self.fsm.peer.region(), from_store_id).is_none()
{
self.ctx.handle_stale_msg(msg, self_epoch.clone(), None);
return true;
}
let target = msg.get_to_peer();
match target.get_id().cmp(&self.fsm.peer.peer_id()) {
cmp::Ordering::Less => {
info!(
"target peer id is smaller, msg maybe stale";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_peer" => ?target,
);
self.ctx.raft_metrics.message_dropped.stale_msg += 1;
true
}
cmp::Ordering::Greater => {
match self.fsm.peer.maybe_destroy(self.ctx) {
Some(job) => {
info!(
"target peer id is larger, destroying self";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_peer" => ?target,
);
if self.handle_destroy_peer(job) {
// It's not frequent, so use 0 as `heap_size` is ok.
let store_msg = StoreMsg::RaftMessage(InspectedRaftMessage {
heap_size: 0,
msg: msg.clone(),
});
if let Err(e) = self.ctx.router.send_control(store_msg) {
info!(
"failed to send back store message, are we shutting down?";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
}
}
}
None => self.ctx.raft_metrics.message_dropped.applying_snap += 1,
}
true
}
cmp::Ordering::Equal => false,
}
}
/// Check if it's necessary to gc the source merge peer.
///
/// If the target merge peer won't be created on this store,
/// then it's appropriate to destroy it immediately.
fn need_gc_merge(&mut self, msg: &RaftMessage) -> Result<bool> {
let merge_target = msg.get_merge_target();
let target_region_id = merge_target.get_id();
debug!(
"receive merge target";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"merge_target" => ?merge_target,
);
// When receiving message that has a merge target, it indicates that the source peer on this
// store is stale, the peers on other stores are already merged. The epoch in merge target
// is the state of target peer at the time when source peer is merged. So here we record the
// merge target epoch version to let the target peer on this store to decide whether to
// destroy the source peer.
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.targets_map.insert(self.region_id(), target_region_id);
let v = meta
.pending_merge_targets
.entry(target_region_id)
.or_default();
let mut no_range_merge_target = merge_target.clone();
no_range_merge_target.clear_start_key();
no_range_merge_target.clear_end_key();
if let Some(pre_merge_target) = v.insert(self.region_id(), no_range_merge_target) {
// Merge target epoch records the version of target region when source region is merged.
// So it must be same no matter when receiving merge target.
if pre_merge_target.get_region_epoch().get_version()
!= merge_target.get_region_epoch().get_version()
{
panic!(
"conflict merge target epoch version {:?} {:?}",
pre_merge_target.get_region_epoch().get_version(),
merge_target.get_region_epoch()
);
}
}
if let Some(r) = meta.regions.get(&target_region_id) {
// In the case that the source peer's range isn't overlapped with target's anymore:
// | region 2 | region 3 | region 1 |
// || merge 3 into 2
// \/
// | region 2 | region 1 |
// || merge 1 into 2
// \/
// | region 2 |
// || split 2 into 4
// \/
// | region 4 |region 2|
// so the new target peer can't find the source peer.
// e.g. new region 2 is overlapped with region 1
//
// If that, source peer still need to decide whether to destroy itself. When the target
// peer has already moved on, source peer can destroy itself.
if util::is_epoch_stale(merge_target.get_region_epoch(), r.get_region_epoch()) {
return Ok(true);
}
return Ok(false);
}
drop(meta);
// All of the target peers must exist before merging which is guaranteed by PD.
// Now the target peer is not in region map, so if everything is ok, the merge target
// region should be staler than the local target region
if self.is_merge_target_region_stale(merge_target)? {
Ok(true)
} else {
if self.ctx.cfg.dev_assert {
panic!(
"something is wrong, maybe PD do not ensure all target peers exist before merging"
);
}
error!(
"something is wrong, maybe PD do not ensure all target peers exist before merging"
);
Ok(false)
}
}
fn handle_gc_peer_msg(&mut self, msg: &RaftMessage) {
let from_epoch = msg.get_region_epoch();
if !util::is_epoch_stale(self.fsm.peer.region().get_region_epoch(), from_epoch) {
return;
}
if self.fsm.peer.peer != *msg.get_to_peer() {
info!(
"receive stale gc message, ignore.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
self.ctx.raft_metrics.message_dropped.stale_msg += 1;
return;
}
// TODO: ask pd to guarantee we are stale now.
info!(
"receives gc message, trying to remove";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"to_peer" => ?msg.get_to_peer(),
);
// Destroy peer in next round in order to apply more committed entries if any.
// It depends on the implementation that msgs which are handled in this round have already fetched.
let _ = self
.ctx
.router
.force_send(self.fsm.region_id(), PeerMsg::Destroy(self.fsm.peer_id()));
}
// Returns `Vec<(u64, bool)>` indicated (source_region_id, merge_to_this_peer) if the `msg`
// doesn't contain a snapshot or this snapshot doesn't conflict with any other snapshots or regions.
// Otherwise a `SnapKey` is returned.
fn check_snapshot(&mut self, msg: &RaftMessage) -> Result<Either<SnapKey, Vec<(u64, bool)>>> {
if !msg.get_message().has_snapshot() {
return Ok(Either::Right(vec![]));
}
let region_id = msg.get_region_id();
let snap = msg.get_message().get_snapshot();
let key = SnapKey::from_region_snap(region_id, snap);
let mut snap_data = RaftSnapshotData::default();
snap_data.merge_from_bytes(snap.get_data())?;
let snap_region = snap_data.take_region();
let peer_id = msg.get_to_peer().get_id();
let snap_enc_start_key = enc_start_key(&snap_region);
let snap_enc_end_key = enc_end_key(&snap_region);
let before_check_snapshot_1_2_fp = || -> bool {
fail_point!(
"before_check_snapshot_1_2",
self.fsm.region_id() == 1 && self.store_id() == 2,
|_| true
);
false
};
let before_check_snapshot_1000_2_fp = || -> bool {
fail_point!(
"before_check_snapshot_1000_2",
self.fsm.region_id() == 1000 && self.store_id() == 2,
|_| true
);
false
};
if before_check_snapshot_1_2_fp() || before_check_snapshot_1000_2_fp() {
return Ok(Either::Left(key));
}
if snap_region
.get_peers()
.iter()
.all(|p| p.get_id() != peer_id)
{
info!(
"snapshot doesn't contain to peer, skip";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"snap" => ?snap_region,
"to_peer" => ?msg.get_to_peer(),
);
self.ctx.raft_metrics.message_dropped.region_no_peer += 1;
return Ok(Either::Left(key));
}
let mut meta = self.ctx.store_meta.lock().unwrap();
if meta.regions[&self.region_id()] != *self.region() {
if !self.fsm.peer.is_initialized() {
info!(
"stale delegate detected, skip";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
self.ctx.raft_metrics.message_dropped.stale_msg += 1;
return Ok(Either::Left(key));
} else {
panic!(
"{} meta corrupted: {:?} != {:?}",
self.fsm.peer.tag,
meta.regions[&self.region_id()],
self.region()
);
}
}
if meta.atomic_snap_regions.contains_key(®ion_id) {
info!(
"atomic snapshot is applying, skip";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return Ok(Either::Left(key));
}
for region in &meta.pending_snapshot_regions {
if enc_start_key(region) < snap_enc_end_key &&
enc_end_key(region) > snap_enc_start_key &&
// Same region can overlap, we will apply the latest version of snapshot.
region.get_id() != snap_region.get_id()
{
info!(
"pending region overlapped";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"region" => ?region,
"snap" => ?snap_region,
);
self.ctx.raft_metrics.message_dropped.region_overlap += 1;
return Ok(Either::Left(key));
}
}
let mut is_overlapped = false;
let mut regions_to_destroy = vec![];
// In some extreme cases, it may cause source peer destroyed improperly so that a later
// CommitMerge may panic because source is already destroyed, so just drop the message:
// 1. A new snapshot is received whereas a snapshot is still in applying, and the snapshot
// under applying is generated before merge and the new snapshot is generated after merge.
// After the applying snapshot is finished, the log may able to catch up and so a
// CommitMerge will be applied.
// 2. There is a CommitMerge pending in apply thread.
let ready = !self.fsm.peer.is_handling_snapshot()
&& !self.fsm.peer.has_pending_snapshot()
// It must be ensured that all logs have been applied.
// Suppose apply fsm is applying a `CommitMerge` log and this snapshot is generated after
// merge, its corresponding source peer can not be destroy by this snapshot.
&& self.fsm.peer.ready_to_handle_pending_snap();
for exist_region in meta
.region_ranges
.range((Excluded(snap_enc_start_key), Unbounded::<Vec<u8>>))
.map(|(_, ®ion_id)| &meta.regions[®ion_id])
.take_while(|r| enc_start_key(r) < snap_enc_end_key)
.filter(|r| r.get_id() != region_id)
{
info!(
"region overlapped";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"exist" => ?exist_region,
"snap" => ?snap_region,
);
let (can_destroy, merge_to_this_peer) = maybe_destroy_source(
&meta,
self.fsm.region_id(),
self.fsm.peer_id(),
exist_region.get_id(),
snap_region.get_region_epoch().to_owned(),
);
if ready && can_destroy {
// The snapshot that we decide to whether destroy peer based on must can be applied.
// So here not to destroy peer immediately, or the snapshot maybe dropped in later
// check but the peer is already destroyed.
regions_to_destroy.push((exist_region.get_id(), merge_to_this_peer));
continue;
}
is_overlapped = true;
if !can_destroy
&& snap_region.get_region_epoch().get_version()
> exist_region.get_region_epoch().get_version()
{
// If snapshot's epoch version is greater than exist region's, the exist region
// may has been merged/splitted already.
let _ = self.ctx.router.force_send(
exist_region.get_id(),
PeerMsg::CasualMessage(CasualMessage::RegionOverlapped),
);
}
}
if is_overlapped {
self.ctx.raft_metrics.message_dropped.region_overlap += 1;
return Ok(Either::Left(key));
}
// Check if snapshot file exists.
self.ctx.snap_mgr.get_snapshot_for_applying(&key)?;
// WARNING: The checking code must be above this line.
// Now all checking passed.
if self.fsm.peer.local_first_replicate && !self.fsm.peer.is_initialized() {
// If the peer is not initialized and passes the snapshot range check, `is_splitting` flag must
// be false.
// 1. If `is_splitting` is set to true, then the uninitialized peer is created before split is applied
// and the peer id is the same as split one. So there should be no initialized peer before.
// 2. If the peer is also created by splitting, then the snapshot range is not overlapped with
// parent peer. It means leader has applied merge and split at least one time. However,
// the prerequisite of merge includes the initialization of all target peers and source peers,
// which is conflict with 1.
let pending_create_peers = self.ctx.pending_create_peers.lock().unwrap();
let status = pending_create_peers.get(®ion_id).cloned();
if status != Some((self.fsm.peer_id(), false)) {
drop(pending_create_peers);
panic!("{} status {:?} is not expected", self.fsm.peer.tag, status);
}
}
meta.pending_snapshot_regions.push(snap_region);
Ok(Either::Right(regions_to_destroy))
}
fn destroy_regions_for_snapshot(&mut self, regions_to_destroy: Vec<(u64, bool)>) {
if regions_to_destroy.is_empty() {
return;
}
let mut meta = self.ctx.store_meta.lock().unwrap();
assert!(!meta.atomic_snap_regions.contains_key(&self.fsm.region_id()));
for (source_region_id, merge_to_this_peer) in regions_to_destroy {
if !meta.regions.contains_key(&source_region_id) {
if merge_to_this_peer {
drop(meta);
panic!(
"{}'s source region {} has been destroyed",
self.fsm.peer.tag, source_region_id
);
}
continue;
}
info!(
"source region destroy due to target region's snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"source_region_id" => source_region_id,
"need_atomic" => merge_to_this_peer,
);
meta.atomic_snap_regions
.entry(self.fsm.region_id())
.or_default()
.insert(source_region_id, false);
meta.destroyed_region_for_snap
.insert(source_region_id, merge_to_this_peer);
let result = if merge_to_this_peer {
MergeResultKind::FromTargetSnapshotStep1
} else {
MergeResultKind::Stale
};
// Use `unwrap` is ok because the StoreMeta lock is held and these source peers still
// exist in regions and region_ranges map.
// It depends on the implementation of `destroy_peer`.
self.ctx
.router
.force_send(
source_region_id,
PeerMsg::SignificantMsg(SignificantMsg::MergeResult {
target_region_id: self.fsm.region_id(),
target: self.fsm.peer.peer.clone(),
result,
}),
)
.unwrap();
}
}
fn on_transfer_leader_msg(&mut self, msg: &eraftpb::Message, peer_disk_usage: DiskUsage) {
// log_term is set by original leader, represents the term last log is written
// in, which should be equal to the original leader's term.
if msg.get_log_term() != self.fsm.peer.term() {
return;
}
if self.fsm.peer.is_leader() {
let from = match self.fsm.peer.get_peer_from_cache(msg.get_from()) {
Some(p) => p,
None => return,
};
match self
.fsm
.peer
.ready_to_transfer_leader(self.ctx, msg.get_index(), &from)
{
Some(reason) => {
info!(
"reject to transfer leader";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"to" => ?from,
"reason" => reason,
"index" => msg.get_index(),
"last_index" => self.fsm.peer.get_store().last_index(),
);
}
None => {
if self.fsm.batch_req_builder.request.is_some() {
self.propose_batch_raft_command(true);
}
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. Then, we can initiate
// transferring leader.
if msg.get_context() != TRANSFER_LEADER_COMMAND_REPLY_CTX
&& self.propose_locks_before_transfer_leader()
{
// If some pessimistic locks are just proposed, we propose another
// TransferLeader command instead of transferring leader immediately.
let mut cmd = new_admin_request(
self.fsm.peer.region().get_id(),
self.fsm.peer.peer.clone(),
);
cmd.mut_header()
.set_region_epoch(self.region().get_region_epoch().clone());
// Set this flag to propose this command like a normal proposal.
cmd.mut_header()
.set_flags(WriteBatchFlags::TRANSFER_LEADER_PROPOSAL.bits());
cmd.mut_admin_request()
.set_cmd_type(AdminCmdType::TransferLeader);
cmd.mut_admin_request().mut_transfer_leader().set_peer(from);
self.propose_raft_command(
cmd,
Callback::None,
DiskFullOpt::AllowedOnAlmostFull,
);
} else {
self.fsm.peer.transfer_leader(&from);
}
}
}
} else {
self.fsm
.peer
.execute_transfer_leader(self.ctx, msg.get_from(), peer_disk_usage, false);
}
}
// Returns whether we should propose another TransferLeader command. This is for:
// 1. Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferree catching up logs.
// 2. Make transferring leader strictly after write commands that executes
// before proposing the locks, preventing unexpected lock loss.
fn propose_locks_before_transfer_leader(&mut self) -> bool {
// 1. Disable in-memory pessimistic locks.
let mut pessimistic_locks = self.fsm.peer.txn_ext.pessimistic_locks.write();
// If `is_valid` is false, the locks should have been proposed. But we still need to
// return true to propose another TransferLeader command. Otherwise, some write requests
// that have marked some locks as deleted will fail because raft rejects more proposals.
if !pessimistic_locks.is_valid {
return true;
}
pessimistic_locks.is_valid = false;
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of pessimistic locks
// in a region, or split commands here.
let mut cmd = RaftCmdRequest::default();
{
// Downgrade to a read guard, do not block readers in the scheduler as far as possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
let mut put = PutRequest::default();
put.set_cf(CF_LOCK.to_string());
put.set_key(key.as_encoded().to_owned());
put.set_value(lock.to_lock().to_bytes());
let mut req = Request::default();
req.set_cmd_type(CmdType::Put);
req.set_put(put);
cmd.mut_requests().push(req);
}
}
if cmd.get_requests().is_empty() {
// If the map is not empty but all locks are deleted, it is possible that a write
// command has just marked locks deleted but not proposed yet. It might cause
// that command to fail if we skip proposing the extra TransferLeader command here.
return true;
}
cmd.mut_header().set_region_id(self.fsm.region_id());
cmd.mut_header()
.set_region_epoch(self.region().get_region_epoch().clone());
cmd.mut_header().set_peer(self.fsm.peer.peer.clone());
self.propose_raft_command(cmd, Callback::None, DiskFullOpt::AllowedOnAlmostFull);
true
}
fn handle_destroy_peer(&mut self, job: DestroyPeerJob) -> bool {
// The initialized flag implicitly means whether apply fsm exists or not.
if job.initialized {
// Destroy the apply fsm first, wait for the reply msg from apply fsm
self.ctx
.apply_router
.schedule_task(job.region_id, ApplyTask::destroy(job.region_id, false));
false
} else {
// Destroy the peer fsm directly
self.destroy_peer(false)
}
}
/// Check if destroy can be executed immediately. If it can't, the reason is returned.
fn maybe_delay_destroy(&mut self) -> Option<DelayReason> {
if self.fsm.peer.has_unpersisted_ready() {
assert!(self.ctx.sync_write_worker.is_none());
// The destroy must be delayed if there are some unpersisted readies.
// Otherwise there is a race of writting kv db and raft db between here
// and write worker.
return Some(DelayReason::UnPersistedReady);
}
if !self.fsm.logs_gc_flushed {
let start_index = self.fsm.peer.last_compacted_idx;
let mut end_index = start_index;
if end_index == 0 {
// Technically, all logs between first index and last index should be accessible
// before being destroyed.
end_index = self.fsm.peer.get_store().first_index();
self.fsm.peer.last_compacted_idx = end_index;
}
let region_id = self.region_id();
let peer_id = self.fsm.peer.peer_id();
let mb = match self.ctx.router.mailbox(region_id) {
Some(mb) => mb,
None => {
if tikv_util::thread_group::is_shutdown(!cfg!(test)) {
// It's shutting down, nothing we can do.
return Some(DelayReason::Shutdown);
}
panic!("{} failed to get mailbox", self.fsm.peer.tag);
}
};
let task = RaftlogGcTask::gc(
self.fsm.peer.get_store().get_region_id(),
start_index,
end_index,
)
.flush()
.when_done(move || {
if let Err(e) =
mb.force_send(PeerMsg::SignificantMsg(SignificantMsg::RaftLogGcFlushed))
{
if tikv_util::thread_group::is_shutdown(!cfg!(test)) {
return;
}
panic!(
"[region {}] {} failed to respond flush message {:?}",
region_id, peer_id, e
);
}
});
if let Err(e) = self.ctx.raftlog_gc_scheduler.schedule(task) {
if tikv_util::thread_group::is_shutdown(!cfg!(test)) {
// It's shutting down, nothing we can do.
return Some(DelayReason::Shutdown);
}
panic!(
"{} failed to schedule raft log task {:?}",
self.fsm.peer.tag, e
);
}
// We need to delete all logs entries to avoid introducing race between
// new peers and old peers. Flushing gc logs allow last_compact_index be
// used directly without seeking.
return Some(DelayReason::UnFlushLogGc);
}
None
}
fn on_raft_log_gc_flushed(&mut self) {
self.fsm.logs_gc_flushed = true;
let delay = match self.fsm.delayed_destroy {
Some(delay) => delay,
None => panic!("{} a delayed destroy should not recover", self.fsm.peer.tag),
};
self.destroy_peer(delay.merged_by_target);
}
// [PerformanceCriticalPath] TODO: spin off the I/O code (self.fsm.peer.destroy)
fn destroy_peer(&mut self, merged_by_target: bool) -> bool {
fail_point!("destroy_peer");
// Mark itself as pending_remove
self.fsm.peer.pending_remove = true;
fail_point!("destroy_peer_after_pending_move", |_| { true });
if let Some(reason) = self.maybe_delay_destroy() {
if self
.fsm
.delayed_destroy
.map_or(false, |delay| delay.reason == reason)
{
panic!(
"{} destroy peer twice with same delay reason, original {:?}, now {}",
self.fsm.peer.tag, self.fsm.delayed_destroy, merged_by_target
);
}
self.fsm.delayed_destroy = Some(DelayDestroy {
merged_by_target,
reason,
});
// TODO: The destroy process can also be asynchronous as snapshot process,
// if so, all write db operations are removed in store thread.
info!(
"delays destroy";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"merged_by_target" => merged_by_target,
"reason" => ?reason,
);
return false;
}
info!(
"starts destroy";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"merged_by_target" => merged_by_target,
);
let region_id = self.region_id();
// We can't destroy a peer which is handling snapshot.
assert!(!self.fsm.peer.is_handling_snapshot());
// No need to wait for the apply anymore.
if self.fsm.unsafe_recovery_target_commit_index.is_some() {
self.finish_unsafe_recovery_wait_apply();
}
let mut meta = self.ctx.store_meta.lock().unwrap();
if meta.atomic_snap_regions.contains_key(&self.region_id()) {
drop(meta);
panic!(
"{} is applying atomic snapshot during destroying",
self.fsm.peer.tag
);
}
// It's possible that this region gets a snapshot then gets a stale peer msg.
// So the data in `pending_snapshot_regions` should be removed here.
meta.pending_snapshot_regions
.retain(|r| self.fsm.region_id() != r.get_id());
// Remove `read_progress` and reset the `safe_ts` to zero to reject
// incoming stale read request
meta.region_read_progress.remove(®ion_id);
self.fsm.peer.read_progress.pause();
// Destroy read delegates.
meta.readers.remove(®ion_id);
// Trigger region change observer
self.ctx.coprocessor_host.on_region_changed(
self.fsm.peer.region(),
RegionChangeEvent::Destroy,
self.fsm.peer.get_role(),
);
let task = PdTask::DestroyPeer { region_id };
if let Err(e) = self.ctx.pd_scheduler.schedule(task) {
error!(
"failed to notify pd";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
}
let is_initialized = self.fsm.peer.is_initialized();
if let Err(e) = self.fsm.peer.destroy(
&self.ctx.engines,
&mut self.ctx.perf_context,
merged_by_target,
) {
// If not panic here, the peer will be recreated in the next restart,
// then it will be gc again. But if some overlap region is created
// before restarting, the gc action will delete the overlap region's
// data too.
panic!("{} destroy err {:?}", self.fsm.peer.tag, e);
}
// Some places use `force_send().unwrap()` if the StoreMeta lock is held.
// So in here, it's necessary to held the StoreMeta lock when closing the router.
self.ctx.router.close(region_id);
self.fsm.stop();
if is_initialized
&& !merged_by_target
&& meta
.region_ranges
.remove(&enc_end_key(self.fsm.peer.region()))
.is_none()
{
panic!("{} meta corruption detected", self.fsm.peer.tag);
}
if meta.regions.remove(®ion_id).is_none() && !merged_by_target {
panic!("{} meta corruption detected", self.fsm.peer.tag)
}
if self.fsm.peer.local_first_replicate {
let mut pending_create_peers = self.ctx.pending_create_peers.lock().unwrap();
if is_initialized {
assert!(pending_create_peers.get(®ion_id).is_none());
} else {
// If this region's data in `pending_create_peers` is not equal to `(peer_id, false)`,
// it means this peer will be replaced by the split one.
if let Some(status) = pending_create_peers.get(®ion_id) {
if *status == (self.fsm.peer_id(), false) {
pending_create_peers.remove(®ion_id);
}
}
}
}
// Clear merge related structures.
if let Some(&need_atomic) = meta.destroyed_region_for_snap.get(®ion_id) {
if need_atomic {
panic!(
"{} should destroy with target region atomically",
self.fsm.peer.tag
);
} else {
let target_region_id = *meta.targets_map.get(®ion_id).unwrap();
let is_ready = meta
.atomic_snap_regions
.get_mut(&target_region_id)
.unwrap()
.get_mut(®ion_id)
.unwrap();
*is_ready = true;
}
}
meta.pending_merge_targets.remove(®ion_id);
if let Some(target) = meta.targets_map.remove(®ion_id) {
if meta.pending_merge_targets.contains_key(&target) {
meta.pending_merge_targets
.get_mut(&target)
.unwrap()
.remove(®ion_id);
// When the target doesn't exist(add peer but the store is isolated), source peer decide to destroy by itself.
// Without target, the `pending_merge_targets` for target won't be removed, so here source peer help target to clear.
if meta.regions.get(&target).is_none()
&& meta.pending_merge_targets.get(&target).unwrap().is_empty()
{
meta.pending_merge_targets.remove(&target);
}
}
}
true
}
// Update some region infos
fn update_region(&mut self, mut region: metapb::Region) {
{
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.set_region(
&self.ctx.coprocessor_host,
region.clone(),
&mut self.fsm.peer,
);
}
for peer in region.take_peers().into_iter() {
if self.fsm.peer.peer_id() == peer.get_id() {
self.fsm.peer.peer = peer.clone();
}
self.fsm.peer.insert_peer_cache(peer);
}
}
fn on_ready_change_peer(&mut self, cp: ChangePeer) {
if cp.index == raft::INVALID_INDEX {
// Apply failed, skip.
return;
}
self.fsm.peer.mut_store().cancel_generating_snap(None);
if cp.index >= self.fsm.peer.raft_group.raft.raft_log.first_index() {
match self.fsm.peer.raft_group.apply_conf_change(&cp.conf_change) {
Ok(_) => {}
// PD could dispatch redundant conf changes.
Err(raft::Error::NotExists { .. }) | Err(raft::Error::Exists { .. }) => {}
_ => unreachable!(),
}
} else {
// Please take a look at test case test_redundant_conf_change_by_snapshot.
}
self.update_region(cp.region);
fail_point!("change_peer_after_update_region");
let now = Instant::now();
let (mut remove_self, mut need_ping) = (false, false);
for mut change in cp.changes {
let (change_type, peer) = (change.get_change_type(), change.take_peer());
let (store_id, peer_id) = (peer.get_store_id(), peer.get_id());
match change_type {
ConfChangeType::AddNode | ConfChangeType::AddLearnerNode => {
let group_id = self
.ctx
.global_replication_state
.lock()
.unwrap()
.group
.group_id(self.fsm.peer.replication_mode_version, store_id);
if group_id.unwrap_or(0) != 0 {
info!("updating group"; "peer_id" => peer_id, "group_id" => group_id.unwrap());
self.fsm
.peer
.raft_group
.raft
.assign_commit_groups(&[(peer_id, group_id.unwrap())]);
}
// Add this peer to peer_heartbeats.
self.fsm.peer.peer_heartbeats.insert(peer_id, now);
if self.fsm.peer.is_leader() {
need_ping = true;
self.fsm.peer.peers_start_pending_time.push((peer_id, now));
// As `raft_max_inflight_msgs` may have been updated via online config
self.fsm
.peer
.raft_group
.raft
.adjust_max_inflight_msgs(peer_id, self.ctx.cfg.raft_max_inflight_msgs);
}
}
ConfChangeType::RemoveNode => {
// Remove this peer from cache.
self.fsm.peer.peer_heartbeats.remove(&peer_id);
if self.fsm.peer.is_leader() {
self.fsm
.peer
.peers_start_pending_time
.retain(|&(p, _)| p != peer_id);
}
self.fsm.peer.remove_peer_from_cache(peer_id);
// We only care remove itself now.
if self.store_id() == store_id {
if self.fsm.peer.peer_id() == peer_id {
remove_self = true;
} else {
panic!(
"{} trying to remove unknown peer {:?}",
self.fsm.peer.tag, peer
);
}
}
}
}
}
// In pattern matching above, if the peer is the leader,
// it will push the change peer into `peers_start_pending_time`
// without checking if it is duplicated. We move `heartbeat_pd` here
// to utilize `collect_pending_peers` in `heartbeat_pd` to avoid
// adding the redundant peer.
if self.fsm.peer.is_leader() {
// Notify pd immediately.
info!(
"notify pd with change peer region";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"region" => ?self.fsm.peer.region(),
);
self.fsm.peer.heartbeat_pd(self.ctx);
if !self.fsm.peer.disk_full_peers.is_empty() {
self.fsm.peer.refill_disk_full_peers(self.ctx);
debug!(
"conf change refills disk full peers to {:?}",
self.fsm.peer.disk_full_peers;
"region_id" => self.fsm.region_id(),
);
}
// Remove or demote leader will cause this raft group unavailable
// until new leader elected, but we can't revert this operation
// because its result is already persisted in apply worker
// TODO: should we transfer leader here?
let demote_self = is_learner(&self.fsm.peer.peer);
if remove_self || demote_self {
warn!(
"Removing or demoting leader";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"remove" => remove_self,
"demote" => demote_self,
);
// If demote_self is true, there is no doubt to become follower.
// If remove_self is true, we also choose to become follower for the
// following reasons.
// There are some functions in raft-rs using `unwrap` to get itself
// progress which will panic when calling them.
// Before introduing async io, this peer will destroy immediately so
// there is no chance to call these functions.
// But maybe it's not true due to delay destroy.
// Most of these functions are only called when the peer is a leader.
// (it's pretty reasonable because progress is used to track others' status)
// The only exception is `Raft::restore` at the time of writing, which is ok
// because the raft msgs(including snapshot) don't be handled when `pending_remove`
// is true(it will be set in `destroy_peer`).
// TODO: totally avoid calling these raft-rs functions when `pending_remove` is true.
self.fsm
.peer
.raft_group
.raft
.become_follower(self.fsm.peer.term(), raft::INVALID_ID);
// Don't ping to speed up leader election
need_ping = false;
}
} else if !self.fsm.peer.has_valid_leader() {
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
}
if need_ping {
// Speed up snapshot instead of waiting another heartbeat.
self.fsm.peer.ping();
self.fsm.has_ready = true;
}
if remove_self {
self.destroy_peer(false);
}
}
fn on_ready_compact_log(&mut self, first_index: u64, state: RaftTruncatedState) {
let total_cnt = self.fsm.peer.last_applying_idx - first_index;
// the size of current CompactLog command can be ignored.
let remain_cnt = self.fsm.peer.last_applying_idx - state.get_index() - 1;
self.fsm.peer.raft_log_size_hint =
self.fsm.peer.raft_log_size_hint * remain_cnt / total_cnt;
let compact_to = state.get_index() + 1;
self.fsm.peer.schedule_raftlog_gc(self.ctx, compact_to);
self.fsm.peer.last_compacted_idx = compact_to;
self.fsm.peer.mut_store().compact_to(compact_to);
}
fn on_ready_split_region(
&mut self,
derived: metapb::Region,
regions: Vec<metapb::Region>,
new_split_regions: HashMap<u64, apply::NewSplitPeer>,
) {
fail_point!("on_split", self.ctx.store_id() == 3, |_| {});
let region_id = derived.get_id();
// Group in-memory pessimistic locks in the original region into new regions. The locks of
// new regions will be put into the corresponding new regions later. And the locks belonging
// to the old region will stay in the original map.
let region_locks = {
let mut pessimistic_locks = self.fsm.peer.txn_ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(®ions, &derived)
};
fail_point!("on_split_invalidate_locks");
// Roughly estimate the size and keys for new regions.
let new_region_count = regions.len() as u64;
let estimated_size = self.fsm.peer.approximate_size.map(|v| v / new_region_count);
let estimated_keys = self.fsm.peer.approximate_keys.map(|v| v / new_region_count);
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.set_region(&self.ctx.coprocessor_host, derived, &mut self.fsm.peer);
self.fsm.peer.post_split();
// It's not correct anymore, so set it to false to schedule a split check task.
self.fsm.peer.has_calculated_region_size = false;
let is_leader = self.fsm.peer.is_leader();
if is_leader {
self.fsm.peer.approximate_size = estimated_size;
self.fsm.peer.approximate_keys = estimated_keys;
self.fsm.peer.heartbeat_pd(self.ctx);
// Notify pd immediately to let it update the region meta.
info!(
"notify pd with split";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"split_count" => regions.len(),
);
// Now pd only uses ReportBatchSplit for history operation show,
// so we send it independently here.
let task = PdTask::ReportBatchSplit {
regions: regions.to_vec(),
};
if let Err(e) = self.ctx.pd_scheduler.schedule(task) {
error!(
"failed to notify pd";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
}
}
let last_key = enc_end_key(regions.last().unwrap());
if meta.region_ranges.remove(&last_key).is_none() {
panic!("{} original region should exist", self.fsm.peer.tag);
}
let last_region_id = regions.last().unwrap().get_id();
for (new_region, locks) in regions.into_iter().zip(region_locks) {
let new_region_id = new_region.get_id();
if new_region_id == region_id {
let not_exist = meta
.region_ranges
.insert(enc_end_key(&new_region), new_region_id)
.is_none();
assert!(not_exist, "[region {}] should not exist", new_region_id);
continue;
}
// Check if this new region should be splitted
let new_split_peer = new_split_regions.get(&new_region.get_id()).unwrap();
if new_split_peer.result.is_some() {
if let Err(e) = self
.fsm
.peer
.mut_store()
.clear_extra_split_data(enc_start_key(&new_region), enc_end_key(&new_region))
{
error!(?e;
"failed to cleanup extra split data, may leave some dirty data";
"region_id" => new_region.get_id(),
);
}
continue;
}
// Now all checking passed.
{
let mut pending_create_peers = self.ctx.pending_create_peers.lock().unwrap();
assert_eq!(
pending_create_peers.remove(&new_region_id),
Some((new_split_peer.peer_id, true))
);
}
// Insert new regions and validation
info!(
"insert new region";
"region_id" => new_region_id,
"region" => ?new_region,
);
if let Some(r) = meta.regions.get(&new_region_id) {
// Suppose a new node is added by conf change and the snapshot comes slowly.
// Then, the region splits and the first vote message comes to the new node
// before the old snapshot, which will create an uninitialized peer on the
// store. After that, the old snapshot comes, followed with the last split
// proposal. After it's applied, the uninitialized peer will be met.
// We can remove this uninitialized peer directly.
if util::is_region_initialized(r) {
panic!(
"[region {}] duplicated region {:?} for split region {:?}",
new_region_id, r, new_region
);
}
self.ctx.router.close(new_region_id);
}
let (sender, mut new_peer) = match PeerFsm::create(
self.ctx.store_id(),
&self.ctx.cfg,
self.ctx.region_scheduler.clone(),
self.ctx.raftlog_fetch_scheduler.clone(),
self.ctx.engines.clone(),
&new_region,
) {
Ok((sender, new_peer)) => (sender, new_peer),
Err(e) => {
// peer information is already written into db, can't recover.
// there is probably a bug.
panic!("create new split region {:?} err {:?}", new_region, e);
}
};
let mut replication_state = self.ctx.global_replication_state.lock().unwrap();
new_peer.peer.init_replication_mode(&mut *replication_state);
drop(replication_state);
let meta_peer = new_peer.peer.peer.clone();
for p in new_region.get_peers() {
// Add this peer to cache.
new_peer.peer.insert_peer_cache(p.clone());
}
// New peer derive write flow from parent region,
// this will be used by balance write flow.
new_peer.peer.peer_stat = self.fsm.peer.peer_stat.clone();
new_peer.peer.last_compacted_idx =
new_peer.apply_state().get_truncated_state().get_index() + 1;
let campaigned = new_peer.peer.maybe_campaign(is_leader);
new_peer.has_ready |= campaigned;
if is_leader {
new_peer.peer.approximate_size = estimated_size;
new_peer.peer.approximate_keys = estimated_keys;
*new_peer.peer.txn_ext.pessimistic_locks.write() = locks;
// The new peer is likely to become leader, send a heartbeat immediately to reduce
// client query miss.
new_peer.peer.heartbeat_pd(self.ctx);
}
new_peer.peer.activate(self.ctx);
meta.regions.insert(new_region_id, new_region.clone());
let not_exist = meta
.region_ranges
.insert(enc_end_key(&new_region), new_region_id)
.is_none();
assert!(not_exist, "[region {}] should not exist", new_region_id);
meta.readers
.insert(new_region_id, ReadDelegate::from_peer(new_peer.get_peer()));
meta.region_read_progress
.insert(new_region_id, new_peer.peer.read_progress.clone());
if last_region_id == new_region_id {
// To prevent from big region, the right region needs run split
// check again after split.
new_peer.peer.size_diff_hint = self.ctx.cfg.region_split_check_diff.0;
}
let mailbox = BasicMailbox::new(sender, new_peer, self.ctx.router.state_cnt().clone());
self.ctx.router.register(new_region_id, mailbox);
self.ctx
.router
.force_send(new_region_id, PeerMsg::Start)
.unwrap();
if !campaigned {
if let Some(msg) = meta
.pending_msgs
.swap_remove_front(|m| m.get_to_peer() == &meta_peer)
{
let peer_msg = PeerMsg::RaftMessage(InspectedRaftMessage { heap_size: 0, msg });
if let Err(e) = self.ctx.router.force_send(new_region_id, peer_msg) {
warn!("handle first requset failed"; "region_id" => region_id, "error" => ?e);
}
}
}
}
drop(meta);
if is_leader {
self.on_split_region_check_tick();
}
fail_point!("after_split", self.ctx.store_id() == 3, |_| {});
}
fn register_merge_check_tick(&mut self) {
self.schedule_tick(PeerTick::CheckMerge)
}
/// Check if merge target region is staler than the local one in kv engine.
/// It should be called when target region is not in region map in memory.
/// If everything is ok, the answer should always be true because PD should ensure all target peers exist.
/// So if not, error log will be printed and return false.
fn is_merge_target_region_stale(&self, target_region: &metapb::Region) -> Result<bool> {
let target_region_id = target_region.get_id();
let target_peer_id = util::find_peer(target_region, self.ctx.store_id())
.unwrap()
.get_id();
let state_key = keys::region_state_key(target_region_id);
if let Some(target_state) = self
.ctx
.engines
.kv
.get_msg_cf::<RegionLocalState>(CF_RAFT, &state_key)?
{
if util::is_epoch_stale(
target_region.get_region_epoch(),
target_state.get_region().get_region_epoch(),
) {
return Ok(true);
}
// The local target region epoch is staler than target region's.
// In the case where the peer is destroyed by receiving gc msg rather than applying conf change,
// the epoch may staler but it's legal, so check peer id to assure that.
if let Some(local_target_peer_id) =
util::find_peer(target_state.get_region(), self.ctx.store_id()).map(|r| r.get_id())
{
match local_target_peer_id.cmp(&target_peer_id) {
cmp::Ordering::Equal => {
if target_state.get_state() == PeerState::Tombstone {
// The local target peer has already been destroyed.
return Ok(true);
}
error!(
"the local target peer state is not tombstone in kv engine";
"target_peer_id" => target_peer_id,
"target_peer_state" => ?target_state.get_state(),
"target_region" => ?target_region,
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
}
cmp::Ordering::Greater => {
// The local target peer id is greater than the one in target region, but its epoch
// is staler than target_region's. That is contradictory.
panic!("{} local target peer id {} is greater than the one in target region {}, but its epoch is staler, local target region {:?},
target region {:?}", self.fsm.peer.tag, local_target_peer_id, target_peer_id, target_state.get_region(), target_region);
}
cmp::Ordering::Less => {
error!(
"the local target peer id in kv engine is less than the one in target region";
"local_target_peer_id" => local_target_peer_id,
"target_peer_id" => target_peer_id,
"target_region" => ?target_region,
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
}
}
} else {
// Can't get local target peer id probably because this target peer is removed by applying conf change
error!(
"the local target peer does not exist in target region state";
"target_region" => ?target_region,
"local_target" => ?target_state.get_region(),
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
}
} else {
error!(
"failed to load target peer's RegionLocalState from kv engine";
"target_peer_id" => target_peer_id,
"target_region" => ?target_region,
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
}
Ok(false)
}
fn validate_merge_peer(&self, target_region: &metapb::Region) -> Result<bool> {
let target_region_id = target_region.get_id();
let exist_region = {
let meta = self.ctx.store_meta.lock().unwrap();
meta.regions.get(&target_region_id).cloned()
};
if let Some(r) = exist_region {
let exist_epoch = r.get_region_epoch();
let expect_epoch = target_region.get_region_epoch();
// exist_epoch > expect_epoch
if util::is_epoch_stale(expect_epoch, exist_epoch) {
return Err(box_err!(
"target region changed {:?} -> {:?}",
target_region,
r
));
}
// exist_epoch < expect_epoch
if util::is_epoch_stale(exist_epoch, expect_epoch) {
info!(
"target region still not catch up, skip.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_region" => ?target_region,
"exist_region" => ?r,
);
return Ok(false);
}
return Ok(true);
}
// All of the target peers must exist before merging which is guaranteed by PD.
// Now the target peer is not in region map.
match self.is_merge_target_region_stale(target_region) {
Err(e) => {
error!(%e;
"failed to load region state, ignore";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_region_id" => target_region_id,
);
Ok(false)
}
Ok(true) => Err(box_err!("region {} is destroyed", target_region_id)),
Ok(false) => {
if self.ctx.cfg.dev_assert {
panic!(
"something is wrong, maybe PD do not ensure all target peers exist before merging"
);
}
error!(
"something is wrong, maybe PD do not ensure all target peers exist before merging"
);
Ok(false)
}
}
}
fn schedule_merge(&mut self) -> Result<()> {
fail_point!("on_schedule_merge", |_| Ok(()));
let (request, target_id) = {
let state = self.fsm.peer.pending_merge_state.as_ref().unwrap();
let expect_region = state.get_target();
if !self.validate_merge_peer(expect_region)? {
// Wait till next round.
return Ok(());
}
let target_id = expect_region.get_id();
let sibling_region = expect_region;
let (min_index, _) = self.fsm.peer.get_min_progress()?;
let low = cmp::max(min_index + 1, state.get_min_index());
// TODO: move this into raft module.
// > over >= to include the PrepareMerge proposal.
let entries = if low > state.get_commit() {
vec![]
} else {
// TODO: fetch entries in async way
match self.fsm.peer.get_store().entries(
low,
state.get_commit() + 1,
NO_LIMIT,
GetEntriesContext::empty(false),
) {
Ok(ents) => ents,
Err(e) => panic!(
"[region {}] {} failed to get merge entires: {:?}, low:{}, commit: {}",
self.fsm.region_id(),
self.fsm.peer_id(),
e,
low,
state.get_commit()
),
}
};
let sibling_peer = util::find_peer(sibling_region, self.store_id()).unwrap();
let mut request = new_admin_request(sibling_region.get_id(), sibling_peer.clone());
request
.mut_header()
.set_region_epoch(sibling_region.get_region_epoch().clone());
let mut admin = AdminRequest::default();
admin.set_cmd_type(AdminCmdType::CommitMerge);
admin
.mut_commit_merge()
.set_source(self.fsm.peer.region().clone());
admin.mut_commit_merge().set_commit(state.get_commit());
admin.mut_commit_merge().set_entries(entries.into());
request.set_admin_request(admin);
(request, target_id)
};
// Please note that, here assumes that the unit of network isolation is store rather than
// peer. So a quorum stores of source region should also be the quorum stores of target
// region. Otherwise we need to enable proposal forwarding.
self.ctx
.router
.force_send(
target_id,
PeerMsg::RaftCommand(RaftCommand::new_ext(
request,
Callback::None,
RaftCmdExtraOpts {
deadline: None,
disk_full_opt: DiskFullOpt::AllowedOnAlmostFull,
},
)),
)
.map_err(|_| Error::RegionNotFound(target_id))
}
fn rollback_merge(&mut self) {
let req = {
let state = self.fsm.peer.pending_merge_state.as_ref().unwrap();
let mut request =
new_admin_request(self.fsm.peer.region().get_id(), self.fsm.peer.peer.clone());
request
.mut_header()
.set_region_epoch(self.fsm.peer.region().get_region_epoch().clone());
let mut admin = AdminRequest::default();
admin.set_cmd_type(AdminCmdType::RollbackMerge);
admin.mut_rollback_merge().set_commit(state.get_commit());
request.set_admin_request(admin);
request
};
self.propose_raft_command(req, Callback::None, DiskFullOpt::AllowedOnAlmostFull);
}
fn on_check_merge(&mut self) {
if self.fsm.stopped
|| self.fsm.peer.pending_remove
|| self.fsm.peer.pending_merge_state.is_none()
{
return;
}
self.register_merge_check_tick();
fail_point!(
"on_check_merge_not_1001",
self.fsm.peer_id() != 1001,
|_| {}
);
if let Err(e) = self.schedule_merge() {
if self.fsm.peer.is_leader() {
self.fsm
.peer
.add_want_rollback_merge_peer(self.fsm.peer_id());
if self
.fsm
.peer
.raft_group
.raft
.prs()
.has_quorum(&self.fsm.peer.want_rollback_merge_peers)
{
info!(
"failed to schedule merge, rollback";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
"error_code" => %e.error_code(),
);
self.rollback_merge();
}
} else if !is_learner(&self.fsm.peer.peer) {
info!(
"want to rollback merge";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"leader_id" => self.fsm.peer.leader_id(),
"err" => %e,
"error_code" => %e.error_code(),
);
if self.fsm.peer.leader_id() != raft::INVALID_ID {
self.fsm.peer.send_want_rollback_merge(
self.fsm
.peer
.pending_merge_state
.as_ref()
.unwrap()
.get_commit(),
self.ctx,
);
}
}
}
}
fn on_ready_prepare_merge(&mut self, region: metapb::Region, state: MergeState) {
{
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.set_region(&self.ctx.coprocessor_host, region, &mut self.fsm.peer);
}
self.fsm.peer.pending_merge_state = Some(state);
let state = self.fsm.peer.pending_merge_state.as_ref().unwrap();
if let Some(ref catch_up_logs) = self.fsm.peer.catch_up_logs {
if state.get_commit() == catch_up_logs.merge.get_commit() {
assert_eq!(state.get_target().get_id(), catch_up_logs.target_region_id);
// Indicate that `on_catch_up_logs_for_merge` has already executed.
// Mark pending_remove because its apply fsm will be destroyed.
self.fsm.peer.pending_remove = true;
// Send CatchUpLogs back to destroy source apply fsm,
// then it will send `Noop` to trigger target apply fsm.
self.ctx.apply_router.schedule_task(
self.fsm.region_id(),
ApplyTask::LogsUpToDate(self.fsm.peer.catch_up_logs.take().unwrap()),
);
return;
}
}
self.on_check_merge();
}
fn on_catch_up_logs_for_merge(&mut self, mut catch_up_logs: CatchUpLogs) {
let region_id = self.fsm.region_id();
assert_eq!(region_id, catch_up_logs.merge.get_source().get_id());
if let Some(ref cul) = self.fsm.peer.catch_up_logs {
panic!(
"{} get catch_up_logs from {} but has already got from {}",
self.fsm.peer.tag, catch_up_logs.target_region_id, cul.target_region_id
)
}
if let Some(ref pending_merge_state) = self.fsm.peer.pending_merge_state {
if pending_merge_state.get_commit() == catch_up_logs.merge.get_commit() {
assert_eq!(
pending_merge_state.get_target().get_id(),
catch_up_logs.target_region_id
);
// Indicate that `on_ready_prepare_merge` has already executed.
// Mark pending_remove because its apply fsm will be destroyed.
self.fsm.peer.pending_remove = true;
// Just for saving memory.
catch_up_logs.merge.clear_entries();
// Send CatchUpLogs back to destroy source apply fsm,
// then it will send `Noop` to trigger target apply fsm.
self.ctx
.apply_router
.schedule_task(region_id, ApplyTask::LogsUpToDate(catch_up_logs));
return;
}
}
// Directly append these logs to raft log and then commit them.
match self
.fsm
.peer
.maybe_append_merge_entries(&catch_up_logs.merge)
{
Some(last_index) => {
info!(
"append and commit entries to source region";
"region_id" => region_id,
"peer_id" => self.fsm.peer.peer_id(),
"last_index" => last_index,
);
// Now it has some committed entries, so mark it to take `Ready` in next round.
self.fsm.has_ready = true;
}
None => {
info!(
"no need to catch up logs";
"region_id" => region_id,
"peer_id" => self.fsm.peer.peer_id(),
);
}
}
// Just for saving memory.
catch_up_logs.merge.clear_entries();
self.fsm.peer.catch_up_logs = Some(catch_up_logs);
}
fn on_ready_commit_merge(
&mut self,
merge_index: u64,
region: metapb::Region,
source: metapb::Region,
) {
self.register_split_region_check_tick();
let mut meta = self.ctx.store_meta.lock().unwrap();
let prev = meta.region_ranges.remove(&enc_end_key(&source));
assert_eq!(prev, Some(source.get_id()));
let prev = if region.get_end_key() == source.get_end_key() {
meta.region_ranges.remove(&enc_start_key(&source))
} else {
meta.region_ranges.remove(&enc_end_key(®ion))
};
if prev != Some(region.get_id()) {
panic!(
"{} meta corrupted: prev: {:?}, ranges: {:?}",
self.fsm.peer.tag, prev, meta.region_ranges
);
}
meta.region_ranges
.insert(enc_end_key(®ion), region.get_id());
assert!(meta.regions.remove(&source.get_id()).is_some());
meta.set_region(&self.ctx.coprocessor_host, region, &mut self.fsm.peer);
if let Some(d) = meta.readers.get_mut(&source.get_id()) {
d.mark_pending_remove();
}
// After the region commit merged, the region's key range is extended and the region's `safe_ts`
// should reset to `min(source_safe_ts, target_safe_ts)`
let source_read_progress = meta.region_read_progress.remove(&source.get_id()).unwrap();
self.fsm
.peer
.read_progress
.merge_safe_ts(source_read_progress.safe_ts(), merge_index);
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.fsm
.peer
.require_updating_max_ts(&self.ctx.pd_scheduler);
drop(meta);
// make approximate size and keys updated in time.
// the reason why follower need to update is that there is a issue that after merge
// and then transfer leader, the new leader may have stale size and keys.
self.fsm.peer.size_diff_hint = self.ctx.cfg.region_split_check_diff.0;
if self.fsm.peer.is_leader() {
info!(
"notify pd with merge";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"source_region" => ?source,
"target_region" => ?self.fsm.peer.region(),
);
self.fsm.peer.heartbeat_pd(self.ctx);
}
if let Err(e) = self.ctx.router.force_send(
source.get_id(),
PeerMsg::SignificantMsg(SignificantMsg::MergeResult {
target_region_id: self.fsm.region_id(),
target: self.fsm.peer.peer.clone(),
result: MergeResultKind::FromTargetLog,
}),
) {
panic!(
"{} failed to send merge result(FromTargetLog) to source region {}, err {}",
self.fsm.peer.tag,
source.get_id(),
e
);
}
}
/// Handle rollbacking Merge result.
///
/// If commit is 0, it means that Merge is rollbacked by a snapshot; otherwise
/// it's rollbacked by a proposal, and its value should be equal to the commit
/// index of previous PrepareMerge.
fn on_ready_rollback_merge(&mut self, commit: u64, region: Option<metapb::Region>) {
let pending_commit = self
.fsm
.peer
.pending_merge_state
.as_ref()
.unwrap()
.get_commit();
if commit != 0 && pending_commit != commit {
panic!(
"{} rollbacks a wrong merge: {} != {}",
self.fsm.peer.tag, pending_commit, commit
);
}
// Clear merge releted data
self.fsm.peer.pending_merge_state = None;
self.fsm.peer.want_rollback_merge_peers.clear();
// Resume updating `safe_ts`
self.fsm.peer.read_progress.resume();
if let Some(r) = region {
let mut meta = self.ctx.store_meta.lock().unwrap();
meta.set_region(&self.ctx.coprocessor_host, r, &mut self.fsm.peer);
}
if self.fsm.peer.is_leader() {
info!(
"notify pd with rollback merge";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"commit_index" => commit,
);
self.fsm.peer.txn_ext.pessimistic_locks.write().is_valid = true;
self.fsm.peer.heartbeat_pd(self.ctx);
}
}
fn on_merge_result(
&mut self,
target_region_id: u64,
target: metapb::Peer,
result: MergeResultKind,
) {
let exists = self
.fsm
.peer
.pending_merge_state
.as_ref()
.map_or(true, |s| {
s.get_target().get_peers().iter().any(|p| {
p.get_store_id() == target.get_store_id() && p.get_id() <= target.get_id()
})
});
if !exists {
panic!(
"{} unexpected merge result: {:?} {:?} {:?}",
self.fsm.peer.tag, self.fsm.peer.pending_merge_state, target, result
);
}
// Because of the checking before proposing `PrepareMerge`, which is
// no `CompactLog` proposal between the smallest commit index and the latest index.
// If the merge succeed, all source peers are impossible in apply snapshot state
// and must be initialized.
{
let meta = self.ctx.store_meta.lock().unwrap();
if meta.atomic_snap_regions.contains_key(&self.region_id()) {
panic!(
"{} is applying atomic snapshot on getting merge result, target region id {}, target peer {:?}, merge result type {:?}",
self.fsm.peer.tag, target_region_id, target, result
);
}
}
if self.fsm.peer.is_handling_snapshot() {
panic!(
"{} is applying snapshot on getting merge result, target region id {}, target peer {:?}, merge result type {:?}",
self.fsm.peer.tag, target_region_id, target, result
);
}
if !self.fsm.peer.is_initialized() {
panic!(
"{} is not initialized on getting merge result, target region id {}, target peer {:?}, merge result type {:?}",
self.fsm.peer.tag, target_region_id, target, result
);
}
match result {
MergeResultKind::FromTargetLog => {
info!(
"merge finished";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_region" => ?self.fsm.peer.pending_merge_state.as_ref().unwrap().target,
);
self.destroy_peer(true);
}
MergeResultKind::FromTargetSnapshotStep1 => {
info!(
"merge finished with target snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_region_id" => target_region_id,
);
self.fsm.peer.pending_remove = true;
// Destroy apply fsm at first
self.ctx.apply_router.schedule_task(
self.fsm.region_id(),
ApplyTask::destroy(self.fsm.region_id(), true),
);
}
MergeResultKind::FromTargetSnapshotStep2 => {
// `merged_by_target` is true because this region's range already belongs to
// its target region so we must not clear data otherwise its target region's
// data will corrupt.
self.destroy_peer(true);
}
MergeResultKind::Stale => {
self.on_stale_merge(target_region_id);
}
};
}
fn on_stale_merge(&mut self, target_region_id: u64) {
if self.fsm.peer.pending_remove {
return;
}
info!(
"successful merge can't be continued, try to gc stale peer";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"target_region_id" => target_region_id,
"merge_state" => ?self.fsm.peer.pending_merge_state,
);
// Because of the checking before proposing `PrepareMerge`, which is
// no `CompactLog` proposal between the smallest commit index and the latest index.
// If the merge succeed, all source peers are impossible in apply snapshot state
// and must be initialized.
// So `maybe_destroy` must succeed here.
let job = self.fsm.peer.maybe_destroy(self.ctx).unwrap();
self.handle_destroy_peer(job);
}
fn on_ready_persist_snapshot(&mut self, persist_res: PersistSnapshotResult) {
let prev_region = persist_res.prev_region;
let region = persist_res.region;
info!(
"snapshot is persisted";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"region" => ?region,
);
let mut state = self.ctx.global_replication_state.lock().unwrap();
let gb = state
.calculate_commit_group(self.fsm.peer.replication_mode_version, region.get_peers());
self.fsm.peer.raft_group.raft.clear_commit_group();
self.fsm.peer.raft_group.raft.assign_commit_groups(gb);
fail_point!("after_assign_commit_groups_on_apply_snapshot");
// drop it before access `store_meta`.
drop(state);
let mut meta = self.ctx.store_meta.lock().unwrap();
debug!(
"check snapshot range";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"prev_region" => ?prev_region,
);
meta.readers.insert(
self.fsm.region_id(),
ReadDelegate::from_peer(&self.fsm.peer),
);
// Remove this region's snapshot region from the `pending_snapshot_regions`
// The `pending_snapshot_regions` is only used to occupy the key range, so if this
// peer is added to `region_ranges`, it can be remove from `pending_snapshot_regions`
meta.pending_snapshot_regions
.retain(|r| self.fsm.region_id() != r.get_id());
// Remove its source peers' metadata
for r in &persist_res.destroy_regions {
let prev = meta.region_ranges.remove(&enc_end_key(r));
assert_eq!(prev, Some(r.get_id()));
assert!(meta.regions.remove(&r.get_id()).is_some());
if let Some(d) = meta.readers.get_mut(&r.get_id()) {
d.mark_pending_remove();
}
}
// Remove the data from `atomic_snap_regions` and `destroyed_region_for_snap`
// which are added before applying snapshot
if let Some(wait_destroy_regions) = meta.atomic_snap_regions.remove(&self.fsm.region_id()) {
for (source_region_id, _) in wait_destroy_regions {
assert_eq!(
meta.destroyed_region_for_snap
.remove(&source_region_id)
.is_some(),
true
);
}
}
if util::is_region_initialized(&prev_region) {
info!(
"region changed after persisting snapshot";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"prev_region" => ?prev_region,
"region" => ?region,
);
let prev = meta.region_ranges.remove(&enc_end_key(&prev_region));
if prev != Some(region.get_id()) {
panic!(
"{} meta corrupted, expect {:?} got {:?}",
self.fsm.peer.tag, prev_region, prev,
);
}
} else if self.fsm.peer.local_first_replicate {
// This peer is uninitialized previously.
// More accurately, the `RegionLocalState` has been persisted so the data can be removed from `pending_create_peers`.
let mut pending_create_peers = self.ctx.pending_create_peers.lock().unwrap();
assert_eq!(
pending_create_peers.remove(&self.fsm.region_id()),
Some((self.fsm.peer_id(), false))
);
}
if let Some(r) = meta
.region_ranges
.insert(enc_end_key(®ion), region.get_id())
{
panic!("{} unexpected region {:?}", self.fsm.peer.tag, r);
}
let prev = meta.regions.insert(region.get_id(), region.clone());
assert_eq!(prev, Some(prev_region));
drop(meta);
self.fsm.peer.read_progress.update_leader_info(
self.fsm.peer.leader_id(),
self.fsm.peer.term(),
®ion,
);
for r in &persist_res.destroy_regions {
if let Err(e) = self.ctx.router.force_send(
r.get_id(),
PeerMsg::SignificantMsg(SignificantMsg::MergeResult {
target_region_id: self.fsm.region_id(),
target: self.fsm.peer.peer.clone(),
result: MergeResultKind::FromTargetSnapshotStep2,
}),
) {
panic!(
"{} failed to send merge result(FromTargetSnapshotStep2) to source region {}, err {}",
self.fsm.peer.tag,
r.get_id(),
e
);
}
}
}
fn on_ready_result(
&mut self,
exec_results: &mut VecDeque<ExecResult<EK::Snapshot>>,
metrics: &ApplyMetrics,
) {
// handle executing committed log results
while let Some(result) = exec_results.pop_front() {
match result {
ExecResult::ChangePeer(cp) => self.on_ready_change_peer(cp),
ExecResult::CompactLog { first_index, state } => {
self.on_ready_compact_log(first_index, state)
}
ExecResult::SplitRegion {
derived,
regions,
new_split_regions,
} => self.on_ready_split_region(derived, regions, new_split_regions),
ExecResult::PrepareMerge { region, state } => {
self.on_ready_prepare_merge(region, state)
}
ExecResult::CommitMerge {
index,
region,
source,
} => self.on_ready_commit_merge(index, region, source),
ExecResult::RollbackMerge { region, commit } => {
self.on_ready_rollback_merge(commit, Some(region))
}
ExecResult::ComputeHash {
region,
index,
context,
snap,
} => self.on_ready_compute_hash(region, index, context, snap),
ExecResult::VerifyHash {
index,
context,
hash,
} => self.on_ready_verify_hash(index, context, hash),
ExecResult::DeleteRange { .. } => {
// TODO: clean user properties?
}
ExecResult::IngestSst { ssts } => self.on_ingest_sst_result(ssts),
ExecResult::TransferLeader { term } => self.on_transfer_leader(term),
}
}
// Update metrics only when all exec_results are finished in case the metrics is counted multiple times
// when waiting for commit merge
self.ctx.store_stat.lock_cf_bytes_written += metrics.lock_cf_written_bytes;
self.ctx.store_stat.engine_total_bytes_written += metrics.written_bytes;
self.ctx.store_stat.engine_total_keys_written += metrics.written_keys;
}
/// Check if a request is valid if it has valid prepare_merge/commit_merge proposal.
fn check_merge_proposal(&self, msg: &mut RaftCmdRequest) -> Result<()> {
if !msg.get_admin_request().has_prepare_merge()
&& !msg.get_admin_request().has_commit_merge()
{
return Ok(());
}
let region = self.fsm.peer.region();
if msg.get_admin_request().has_prepare_merge() {
// Just for simplicity, do not start region merge while in joint state
if self.fsm.peer.in_joint_state() {
return Err(box_err!(
"{} region in joint state, can not propose merge command, command: {:?}",
self.fsm.peer.tag,
msg.get_admin_request()
));
}
let target_region = msg.get_admin_request().get_prepare_merge().get_target();
{
let meta = self.ctx.store_meta.lock().unwrap();
match meta.regions.get(&target_region.get_id()) {
Some(r) => {
if r != target_region {
return Err(box_err!(
"target region not matched, skip proposing: {:?} != {:?}",
r,
target_region
));
}
}
None => {
return Err(box_err!(
"target region {} doesn't exist.",
target_region.get_id()
));
}
}
}
if !util::is_sibling_regions(target_region, region) {
return Err(box_err!(
"{:?} and {:?} are not sibling, skip proposing.",
target_region,
region
));
}
if !util::region_on_same_stores(target_region, region) {
return Err(box_err!(
"peers doesn't match {:?} != {:?}, reject merge",
region.get_peers(),
target_region.get_peers()
));
}
} else {
let source_region = msg.get_admin_request().get_commit_merge().get_source();
if !util::is_sibling_regions(source_region, region) {
return Err(box_err!(
"{:?} and {:?} should be sibling",
source_region,
region
));
}
if !util::region_on_same_stores(source_region, region) {
return Err(box_err!(
"peers not matched: {:?} {:?}",
source_region,
region
));
}
}
Ok(())
}
fn pre_propose_raft_command(
&mut self,
msg: &RaftCmdRequest,
) -> Result<Option<RaftCmdResponse>> {
// Check store_id, make sure that the msg is dispatched to the right place.
if let Err(e) = util::check_store_id(msg, self.store_id()) {
self.ctx.raft_metrics.invalid_proposal.mismatch_store_id += 1;
return Err(e);
}
if msg.has_status_request() {
// For status commands, we handle it here directly.
let resp = self.execute_status_command(msg)?;
return Ok(Some(resp));
}
// Check whether the store has the right peer to handle the request.
let region_id = self.region_id();
let leader_id = self.fsm.peer.leader_id();
let request = msg.get_requests();
// ReadIndex can be processed on the replicas.
let is_read_index_request =
request.len() == 1 && request[0].get_cmd_type() == CmdType::ReadIndex;
let mut read_only = true;
for r in msg.get_requests() {
match r.get_cmd_type() {
CmdType::Get | CmdType::Snap | CmdType::ReadIndex => (),
_ => read_only = false,
}
}
let allow_replica_read = read_only && msg.get_header().get_replica_read();
let flags = WriteBatchFlags::from_bits_check(msg.get_header().get_flags());
let allow_stale_read = read_only && flags.contains(WriteBatchFlags::STALE_READ);
if !self.fsm.peer.is_leader()
&& !is_read_index_request
&& !allow_replica_read
&& !allow_stale_read
{
self.ctx.raft_metrics.invalid_proposal.not_leader += 1;
let leader = self.fsm.peer.get_peer_from_cache(leader_id);
self.fsm.reset_hibernate_state(GroupState::Chaos);
self.register_raft_base_tick();
return Err(Error::NotLeader(region_id, leader));
}
// peer_id must be the same as peer's.
if let Err(e) = util::check_peer_id(msg, self.fsm.peer.peer_id()) {
self.ctx.raft_metrics.invalid_proposal.mismatch_peer_id += 1;
return Err(e);
}
// check whether the peer is initialized.
if !self.fsm.peer.is_initialized() {
self.ctx
.raft_metrics
.invalid_proposal
.region_not_initialized += 1;
return Err(Error::RegionNotInitialized(region_id));
}
// If the peer is applying snapshot, it may drop some sending messages, that could
// make clients wait for response until timeout.
if self.fsm.peer.is_handling_snapshot() {
self.ctx.raft_metrics.invalid_proposal.is_applying_snapshot += 1;
// TODO: replace to a more suitable error.
return Err(Error::Other(box_err!(
"{} peer is applying snapshot",
self.fsm.peer.tag
)));
}
// Check whether the term is stale.
if let Err(e) = util::check_term(msg, self.fsm.peer.term()) {
self.ctx.raft_metrics.invalid_proposal.stale_command += 1;
return Err(e);
}
match util::check_region_epoch(msg, self.fsm.peer.region(), true) {
Err(Error::EpochNotMatch(m, mut new_regions)) => {
// Attach the region which might be split from the current region. But it doesn't
// matter if the region is not split from the current region. If the region meta
// received by the TiKV driver is newer than the meta cached in the driver, the meta is
// updated.
let requested_version = msg.get_header().get_region_epoch().version;
self.collect_sibling_region(requested_version, &mut new_regions);
self.ctx.raft_metrics.invalid_proposal.epoch_not_match += 1;
Err(Error::EpochNotMatch(m, new_regions))
}
Err(e) => Err(e),
Ok(()) => Ok(None),
}
}
/// Propose batched raft commands(if any) first, then propose the given raft command.
fn propose_raft_command(
&mut self,
msg: RaftCmdRequest,
cb: Callback<EK::Snapshot>,
diskfullopt: DiskFullOpt,
) {
if let Some((request, callback)) =
self.fsm.batch_req_builder.build(&mut self.ctx.raft_metrics)
{
self.propose_raft_command_internal(request, callback, DiskFullOpt::NotAllowedOnFull);
}
self.propose_raft_command_internal(msg, cb, diskfullopt);
}
/// Propose the raft command directly.
/// Note that this function introduces a reorder between this command and batched commands.
fn propose_raft_command_internal(
&mut self,
mut msg: RaftCmdRequest,
cb: Callback<EK::Snapshot>,
diskfullopt: DiskFullOpt,
) {
if self.fsm.peer.pending_remove {
apply::notify_req_region_removed(self.region_id(), cb);
return;
}
if self.ctx.raft_metrics.waterfall_metrics {
if let Some(request_times) = cb.get_request_times() {
let now = TiInstant::now();
for t in request_times {
self.ctx
.raft_metrics
.wf_batch_wait
.observe(duration_to_sec(now.saturating_duration_since(*t)));
}
}
}
match self.pre_propose_raft_command(&msg) {
Ok(Some(resp)) => {
cb.invoke_with_response(resp);
return;
}
Err(e) => {
debug!(
"failed to propose";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"message" => ?msg,
"err" => %e,
);
cb.invoke_with_response(new_error(e));
return;
}
_ => (),
}
if let Err(e) = self.check_merge_proposal(&mut msg) {
warn!(
"failed to propose merge";
"region_id" => self.region_id(),
"peer_id" => self.fsm.peer_id(),
"message" => ?msg,
"err" => %e,
"error_code" => %e.error_code(),
);
cb.invoke_with_response(new_error(e));
return;
}
// Note:
// The peer that is being checked is a leader. It might step down to be a follower later. It
// doesn't matter whether the peer is a leader or not. If it's not a leader, the proposing
// command log entry can't be committed.
let mut resp = RaftCmdResponse::default();
let term = self.fsm.peer.term();
bind_term(&mut resp, term);
if self.fsm.peer.propose(self.ctx, cb, msg, resp, diskfullopt) {
self.fsm.has_ready = true;
}
if self.fsm.peer.should_wake_up {
self.reset_raft_tick(GroupState::Ordered);
}
self.register_pd_heartbeat_tick();
// TODO: add timeout, if the command is not applied after timeout,
// we will call the callback with timeout error.
}
fn collect_sibling_region(&self, requested_version: u64, regions: &mut Vec<Region>) {
let mut max_version = self.fsm.peer.region().get_region_epoch().version;
if requested_version >= max_version {
// Our information is stale.
return;
}
// Current region is included in the vec.
let mut collect_cnt = max_version - requested_version;
let anchor = Excluded(enc_end_key(self.fsm.peer.region()));
let meta = self.ctx.store_meta.lock().unwrap();
let mut ranges = if self.ctx.cfg.right_derive_when_split {
meta.region_ranges.range((Unbounded::<Vec<u8>>, anchor))
} else {
meta.region_ranges.range((anchor, Unbounded::<Vec<u8>>))
};
for _ in 0..MAX_REGIONS_IN_ERROR {
let res = if self.ctx.cfg.right_derive_when_split {
ranges.next_back()
} else {
ranges.next()
};
if let Some((_, id)) = res {
let r = &meta.regions[id];
collect_cnt -= 1;
// For example, A is split into B, A, and then B is split into C, B.
if r.get_region_epoch().version >= max_version {
// It doesn't matter if it's a false positive, as it's limited by MAX_REGIONS_IN_ERROR.
collect_cnt += r.get_region_epoch().version - max_version;
max_version = r.get_region_epoch().version;
}
regions.push(r.to_owned());
if collect_cnt == 0 {
return;
}
} else {
return;
}
}
}
fn register_raft_gc_log_tick(&mut self) {
self.schedule_tick(PeerTick::RaftLogGc)
}
#[allow(clippy::if_same_then_else)]
fn on_raft_gc_log_tick(&mut self, force_compact: bool) {
if !self.fsm.peer.is_leader() {
// `compact_cache_to` is called when apply, there is no need to call `compact_to` here,
// snapshot generating has already been cancelled when the role becomes follower.
return;
}
if !self.fsm.peer.get_store().is_cache_empty() || !self.ctx.cfg.hibernate_regions {
self.register_raft_gc_log_tick();
}
fail_point!("on_raft_log_gc_tick_1", self.fsm.peer_id() == 1, |_| {});
fail_point!("on_raft_gc_log_tick", |_| {});
debug_assert!(!self.fsm.stopped);
// As leader, we would not keep caches for the peers that didn't response heartbeat in the
// last few seconds. That happens probably because another TiKV is down. In this case if we
// do not clean up the cache, it may keep growing.
let drop_cache_duration =
self.ctx.cfg.raft_heartbeat_interval() + self.ctx.cfg.raft_entry_cache_life_time.0;
let cache_alive_limit = Instant::now() - drop_cache_duration;
// Leader will replicate the compact log command to followers,
// If we use current replicated_index (like 10) as the compact index,
// when we replicate this log, the newest replicated_index will be 11,
// but we only compact the log to 10, not 11, at that time,
// the first index is 10, and replicated_index is 11, with an extra log,
// and we will do compact again with compact index 11, in cycles...
// So we introduce a threshold, if replicated index - first index > threshold,
// we will try to compact log.
// raft log entries[..............................................]
// ^ ^
// |-----------------threshold------------ |
// first_index replicated_index
// `alive_cache_idx` is the smallest `replicated_index` of healthy up nodes.
// `alive_cache_idx` is only used to gc cache.
let applied_idx = self.fsm.peer.get_store().applied_index();
let truncated_idx = self.fsm.peer.get_store().truncated_index();
let last_idx = self.fsm.peer.get_store().last_index();
let (mut replicated_idx, mut alive_cache_idx) = (last_idx, last_idx);
for (peer_id, p) in self.fsm.peer.raft_group.raft.prs().iter() {
if replicated_idx > p.matched {
replicated_idx = p.matched;
}
if let Some(last_heartbeat) = self.fsm.peer.peer_heartbeats.get(peer_id) {
if alive_cache_idx > p.matched
&& p.matched >= truncated_idx
&& *last_heartbeat > cache_alive_limit
{
alive_cache_idx = p.matched;
}
}
}
// When an election happened or a new peer is added, replicated_idx can be 0.
if replicated_idx > 0 {
assert!(
last_idx >= replicated_idx,
"expect last index {} >= replicated index {}",
last_idx,
replicated_idx
);
REGION_MAX_LOG_LAG.observe((last_idx - replicated_idx) as f64);
}
self.fsm
.peer
.mut_store()
.maybe_gc_cache(alive_cache_idx, applied_idx);
if needs_evict_entry_cache(self.ctx.cfg.evict_cache_on_memory_ratio) {
self.fsm.peer.mut_store().evict_cache(true);
if !self.fsm.peer.get_store().cache_is_empty() {
self.register_entry_cache_evict_tick();
}
}
let mut total_gc_logs = 0;
let first_idx = self.fsm.peer.get_store().first_index();
let mut compact_idx = if force_compact
// Too many logs between applied index and first index.
|| (applied_idx > first_idx && applied_idx - first_idx >= self.ctx.cfg.raft_log_gc_count_limit)
// Raft log size ecceeds the limit.
|| (self.fsm.peer.raft_log_size_hint >= self.ctx.cfg.raft_log_gc_size_limit.0)
{
std::cmp::max(first_idx + (last_idx - first_idx) / 4, replicated_idx)
} else if replicated_idx < first_idx || last_idx - first_idx < 3 {
// In the current implementation one compaction can't delete all stale Raft logs.
// There will be at least 3 entries left after one compaction:
// |------------- entries needs to be compacted ----------|
// [entries...][the entry at `compact_idx`][the last entry][new compaction entry]
// |-------------------- entries will be left ----------------------|
self.ctx.raft_metrics.raft_log_gc_skipped.reserve_log += 1;
return;
} else if replicated_idx - first_idx < self.ctx.cfg.raft_log_gc_threshold
&& self.fsm.skip_gc_raft_log_ticks < self.ctx.cfg.raft_log_reserve_max_ticks
{
self.ctx.raft_metrics.raft_log_gc_skipped.threshold_limit += 1;
// Logs will only be kept `max_ticks` * `raft_log_gc_tick_interval`.
self.fsm.skip_gc_raft_log_ticks += 1;
self.register_raft_gc_log_tick();
return;
} else {
replicated_idx
};
assert!(compact_idx >= first_idx);
// Have no idea why subtract 1 here, but original code did this by magic.
compact_idx -= 1;
if compact_idx < first_idx {
// In case compact_idx == first_idx before subtraction.
self.ctx
.raft_metrics
.raft_log_gc_skipped
.compact_idx_too_small += 1;
return;
}
total_gc_logs += compact_idx - first_idx;
// Create a compact log request and notify directly.
let region_id = self.fsm.peer.region().get_id();
let peer = self.fsm.peer.peer.clone();
let term = self.fsm.peer.get_index_term(compact_idx);
let request = new_compact_log_request(region_id, peer, compact_idx, term);
self.propose_raft_command_internal(
request,
Callback::None,
DiskFullOpt::AllowedOnAlmostFull,
);
self.fsm.skip_gc_raft_log_ticks = 0;
self.register_raft_gc_log_tick();
PEER_GC_RAFT_LOG_COUNTER.inc_by(total_gc_logs);
}
fn register_entry_cache_evict_tick(&mut self) {
self.schedule_tick(PeerTick::EntryCacheEvict)
}
fn on_entry_cache_evict_tick(&mut self) {
fail_point!("on_entry_cache_evict_tick", |_| {});
if needs_evict_entry_cache(self.ctx.cfg.evict_cache_on_memory_ratio) {
self.fsm.peer.mut_store().evict_cache(true);
}
let mut _usage = 0;
if memory_usage_reaches_high_water(&mut _usage)
&& !self.fsm.peer.get_store().cache_is_empty()
{
self.register_entry_cache_evict_tick();
}
}
fn register_check_leader_lease_tick(&mut self) {
self.schedule_tick(PeerTick::CheckLeaderLease)
}
fn on_check_leader_lease_tick(&mut self) {
if !self.fsm.peer.is_leader() || self.fsm.hibernate_state.group_state() == GroupState::Idle
{
return;
}
self.try_renew_leader_lease();
self.register_check_leader_lease_tick();
}
fn register_split_region_check_tick(&mut self) {
self.schedule_tick(PeerTick::SplitRegionCheck)
}
#[inline]
fn region_split_skip_max_count(&self) -> usize {
fail_point!("region_split_skip_max_count", |_| { usize::max_value() });
REGION_SPLIT_SKIP_MAX_COUNT
}
fn on_split_region_check_tick(&mut self) {
if !self.fsm.peer.is_leader() {
return;
}
// When restart, the has_calculated_region_size will be false. The split check will first
// check the region size, and then check whether the region should split. This
// should work even if we change the region max size.
// If peer says should update approximate size, update region size and check
// whether the region should split.
// We assume that `has_calculated_region_size` is only set true when receives an
// accurate value sent from split-check thread.
if self.fsm.peer.has_calculated_region_size
&& self.fsm.peer.compaction_declined_bytes < self.ctx.cfg.region_split_check_diff.0
&& self.fsm.peer.size_diff_hint < self.ctx.cfg.region_split_check_diff.0
{
return;
}
fail_point!("on_split_region_check_tick");
self.register_split_region_check_tick();
// To avoid frequent scan, we only add new scan tasks if all previous tasks
// have finished.
// TODO: check whether a gc progress has been started.
if self.ctx.split_check_scheduler.is_busy() {
return;
}
// When Lightning or BR is importing data to TiKV, their ingest-request may fail because of
// region-epoch not matched. So we hope TiKV do not check region size and split region during
// importing.
if self.ctx.importer.get_mode() == SwitchMode::Import {
return;
}
// bulk insert too fast may cause snapshot stale very soon, worst case it stale before
// sending. so when snapshot is generating or sending, skip split check at most 3 times.
// There is a trade off between region size and snapshot success rate. Split check is
// triggered every 10 seconds. If a snapshot can't be generated in 30 seconds, it might be
// just too large to be generated. Split it into smaller size can help generation. check
// issue 330 for more info.
if self.fsm.peer.get_store().is_generating_snapshot()
&& self.fsm.skip_split_count < self.region_split_skip_max_count()
{
self.fsm.skip_split_count += 1;
return;
}
self.fsm.skip_split_count = 0;
let task = SplitCheckTask::split_check(self.region().clone(), true, CheckPolicy::Scan);
if let Err(e) = self.ctx.split_check_scheduler.schedule(task) {
error!(
"failed to schedule split check";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
return;
}
self.fsm.peer.size_diff_hint = 0;
self.fsm.peer.compaction_declined_bytes = 0;
}
fn on_prepare_split_region(
&mut self,
region_epoch: metapb::RegionEpoch,
split_keys: Vec<Vec<u8>>,
cb: Callback<EK::Snapshot>,
source: &str,
) {
info!(
"on split";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"split_keys" => %KeysInfoFormatter(split_keys.iter()),
"source" => source,
);
if let Err(e) = self.validate_split_region(®ion_epoch, &split_keys) {
cb.invoke_with_response(new_error(e));
return;
}
let region = self.fsm.peer.region();
let task = PdTask::AskBatchSplit {
region: region.clone(),
split_keys,
peer: self.fsm.peer.peer.clone(),
right_derive: self.ctx.cfg.right_derive_when_split,
callback: cb,
};
if let Err(ScheduleError::Stopped(t)) = self.ctx.pd_scheduler.schedule(task) {
error!(
"failed to notify pd to split: Stopped";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
match t {
PdTask::AskBatchSplit { callback, .. } => {
callback.invoke_with_response(new_error(box_err!(
"{} failed to split: Stopped",
self.fsm.peer.tag
)));
}
_ => unreachable!(),
}
}
}
fn validate_split_region(
&mut self,
epoch: &metapb::RegionEpoch,
split_keys: &[Vec<u8>],
) -> Result<()> {
if split_keys.is_empty() {
error!(
"no split key is specified.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return Err(box_err!("{} no split key is specified.", self.fsm.peer.tag));
}
for key in split_keys {
if key.is_empty() {
error!(
"split key should not be empty!!!";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return Err(box_err!(
"{} split key should not be empty",
self.fsm.peer.tag
));
}
}
if !self.fsm.peer.is_leader() {
// region on this store is no longer leader, skipped.
info!(
"not leader, skip.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return Err(Error::NotLeader(
self.region_id(),
self.fsm.peer.get_peer_from_cache(self.fsm.peer.leader_id()),
));
}
let region = self.fsm.peer.region();
let latest_epoch = region.get_region_epoch();
// This is a little difference for `check_region_epoch` in region split case.
// Here we just need to check `version` because `conf_ver` will be update
// to the latest value of the peer, and then send to PD.
if latest_epoch.get_version() != epoch.get_version() {
info!(
"epoch changed, retry later";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"prev_epoch" => ?region.get_region_epoch(),
"epoch" => ?epoch,
);
return Err(Error::EpochNotMatch(
format!(
"{} epoch changed {:?} != {:?}, retry later",
self.fsm.peer.tag, latest_epoch, epoch
),
vec![region.to_owned()],
));
}
Ok(())
}
fn on_approximate_region_size(&mut self, size: u64) {
self.fsm.peer.approximate_size = Some(size);
self.fsm.peer.has_calculated_region_size = true;
self.register_split_region_check_tick();
self.register_pd_heartbeat_tick();
fail_point!("on_approximate_region_size");
}
fn on_approximate_region_keys(&mut self, keys: u64) {
self.fsm.peer.approximate_keys = Some(keys);
self.register_split_region_check_tick();
self.register_pd_heartbeat_tick();
}
fn on_compaction_declined_bytes(&mut self, declined_bytes: u64) {
self.fsm.peer.compaction_declined_bytes += declined_bytes;
if self.fsm.peer.compaction_declined_bytes >= self.ctx.cfg.region_split_check_diff.0 {
UPDATE_REGION_SIZE_BY_COMPACTION_COUNTER.inc();
}
self.register_split_region_check_tick();
}
fn on_schedule_half_split_region(
&mut self,
region_epoch: &metapb::RegionEpoch,
policy: CheckPolicy,
source: &str,
) {
info!(
"on half split";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"policy" => ?policy,
"source" => source,
);
if !self.fsm.peer.is_leader() {
// region on this store is no longer leader, skipped.
warn!(
"not leader, skip";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return;
}
let region = self.fsm.peer.region();
if util::is_epoch_stale(region_epoch, region.get_region_epoch()) {
warn!(
"receive a stale halfsplit message";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return;
}
let task = SplitCheckTask::split_check(region.clone(), false, policy);
if let Err(e) = self.ctx.split_check_scheduler.schedule(task) {
error!(
"failed to schedule split check";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
}
}
fn on_pd_heartbeat_tick(&mut self) {
if !self.ctx.cfg.hibernate_regions {
self.register_pd_heartbeat_tick();
}
self.fsm.peer.check_peers();
if !self.fsm.peer.is_leader() {
return;
}
self.fsm.peer.heartbeat_pd(self.ctx);
if self.ctx.cfg.hibernate_regions && self.fsm.peer.replication_mode_need_catch_up() {
self.register_pd_heartbeat_tick();
}
}
fn register_pd_heartbeat_tick(&mut self) {
self.schedule_tick(PeerTick::PdHeartbeat)
}
fn on_check_peer_stale_state_tick(&mut self) {
if self.fsm.peer.pending_remove {
return;
}
self.register_check_peer_stale_state_tick();
if self.fsm.peer.is_handling_snapshot() || self.fsm.peer.has_pending_snapshot() {
return;
}
if self.ctx.cfg.hibernate_regions {
let group_state = self.fsm.hibernate_state.group_state();
if group_state == GroupState::Idle {
self.fsm.peer.ping();
if !self.fsm.peer.is_leader() {
// If leader is able to receive message but can't send out any,
// follower should be able to start an election.
self.fsm.reset_hibernate_state(GroupState::PreChaos);
} else {
self.fsm.has_ready = true;
// Schedule a pd heartbeat to discover down and pending peer when
// hibernate_regions is enabled.
self.register_pd_heartbeat_tick();
}
} else if group_state == GroupState::PreChaos {
self.fsm.reset_hibernate_state(GroupState::Chaos);
} else if group_state == GroupState::Chaos {
// Register tick if it's not yet. Only when it fails to receive ping from leader
// after two stale check can a follower actually tick.
self.register_raft_base_tick();
}
}
// If this peer detects the leader is missing for a long long time,
// it should consider itself as a stale peer which is removed from
// the original cluster.
// This most likely happens in the following scenario:
// At first, there are three peer A, B, C in the cluster, and A is leader.
// Peer B gets down. And then A adds D, E, F into the cluster.
// Peer D becomes leader of the new cluster, and then removes peer A, B, C.
// After all these peer in and out, now the cluster has peer D, E, F.
// If peer B goes up at this moment, it still thinks it is one of the cluster
// and has peers A, C. However, it could not reach A, C since they are removed
// from the cluster or probably destroyed.
// Meantime, D, E, F would not reach B, since it's not in the cluster anymore.
// In this case, peer B would notice that the leader is missing for a long time,
// and it would check with pd to confirm whether it's still a member of the cluster.
// If not, it destroys itself as a stale peer which is removed out already.
let state = self.fsm.peer.check_stale_state(self.ctx);
fail_point!("peer_check_stale_state", state != StaleState::Valid, |_| {});
match state {
StaleState::Valid => (),
StaleState::LeaderMissing => {
warn!(
"leader missing longer than abnormal_leader_missing_duration";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"expect" => %self.ctx.cfg.abnormal_leader_missing_duration,
);
self.ctx
.raft_metrics
.leader_missing
.lock()
.unwrap()
.insert(self.region_id());
}
StaleState::ToValidate => {
// for peer B in case 1 above
warn!(
"leader missing longer than max_leader_missing_duration. \
To check with pd and other peers whether it's still valid";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"expect" => %self.ctx.cfg.max_leader_missing_duration,
);
self.fsm.peer.bcast_check_stale_peer_message(self.ctx);
let task = PdTask::ValidatePeer {
peer: self.fsm.peer.peer.clone(),
region: self.fsm.peer.region().clone(),
};
if let Err(e) = self.ctx.pd_scheduler.schedule(task) {
error!(
"failed to notify pd";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
)
}
}
}
}
fn register_check_peer_stale_state_tick(&mut self) {
self.schedule_tick(PeerTick::CheckPeerStaleState)
}
}
impl<'a, EK, ER, T: Transport> PeerFsmDelegate<'a, EK, ER, T>
where
EK: KvEngine,
ER: RaftEngine,
{
fn on_ready_compute_hash(
&mut self,
region: metapb::Region,
index: u64,
context: Vec<u8>,
snap: EK::Snapshot,
) {
self.fsm.peer.consistency_state.last_check_time = Instant::now();
let task = ConsistencyCheckTask::compute_hash(region, index, context, snap);
info!(
"schedule compute hash task";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"task" => %task,
);
if let Err(e) = self.ctx.consistency_check_scheduler.schedule(task) {
error!(
"schedule failed";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"err" => %e,
);
}
}
fn on_ready_verify_hash(
&mut self,
expected_index: u64,
context: Vec<u8>,
expected_hash: Vec<u8>,
) {
self.verify_and_store_hash(expected_index, context, expected_hash);
}
fn on_hash_computed(&mut self, index: u64, context: Vec<u8>, hash: Vec<u8>) {
if !self.verify_and_store_hash(index, context, hash) {
return;
}
let req = new_verify_hash_request(
self.region_id(),
self.fsm.peer.peer.clone(),
&self.fsm.peer.consistency_state,
);
self.propose_raft_command_internal(req, Callback::None, DiskFullOpt::NotAllowedOnFull);
}
fn on_ingest_sst_result(&mut self, ssts: Vec<SSTMetaInfo>) {
let mut size = 0;
let mut keys = 0;
for sst in &ssts {
size += sst.total_bytes;
keys += sst.total_kvs;
}
self.fsm.peer.approximate_size =
Some(self.fsm.peer.approximate_size.unwrap_or_default() + size);
self.fsm.peer.approximate_keys =
Some(self.fsm.peer.approximate_keys.unwrap_or_default() + keys);
// The ingested file may be overlapped with the data in engine, so we need to check it
// again to get the accurate value.
self.fsm.peer.has_calculated_region_size = false;
if self.fsm.peer.is_leader() {
self.on_pd_heartbeat_tick();
self.register_split_region_check_tick();
}
}
fn on_transfer_leader(&mut self, term: u64) {
// If the term has changed between proposing and executing the TransferLeader request,
// ignore it because this request may be stale.
if term != self.fsm.peer.term() {
return;
}
// As the leader can propose the TransferLeader request successfully, the disk of
// the leader is probably not full.
self.fsm.peer.execute_transfer_leader(
self.ctx,
self.fsm.peer.leader_id(),
DiskUsage::Normal,
true,
);
self.fsm.has_ready = true;
}
/// Verify and store the hash to state. return true means the hash has been stored successfully.
// TODO: Consider context in the function.
fn verify_and_store_hash(
&mut self,
expected_index: u64,
_context: Vec<u8>,
expected_hash: Vec<u8>,
) -> bool {
if expected_index < self.fsm.peer.consistency_state.index {
REGION_HASH_COUNTER.verify.miss.inc();
warn!(
"has scheduled a new hash, skip.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"index" => self.fsm.peer.consistency_state.index,
"expected_index" => expected_index,
);
return false;
}
if self.fsm.peer.consistency_state.index == expected_index {
if self.fsm.peer.consistency_state.hash.is_empty() {
warn!(
"duplicated consistency check detected, skip.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
);
return false;
}
if self.fsm.peer.consistency_state.hash != expected_hash {
panic!(
"{} hash at {} not correct, want \"{}\", got \"{}\"!!!",
self.fsm.peer.tag,
self.fsm.peer.consistency_state.index,
escape(&expected_hash),
escape(&self.fsm.peer.consistency_state.hash)
);
}
info!(
"consistency check pass.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"index" => self.fsm.peer.consistency_state.index
);
REGION_HASH_COUNTER.verify.matched.inc();
self.fsm.peer.consistency_state.hash = vec![];
return false;
}
if self.fsm.peer.consistency_state.index != INVALID_INDEX
&& !self.fsm.peer.consistency_state.hash.is_empty()
{
// Maybe computing is too slow or computed result is dropped due to channel full.
// If computing is too slow, miss count will be increased twice.
REGION_HASH_COUNTER.verify.miss.inc();
warn!(
"hash belongs to wrong index, skip.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"index" => self.fsm.peer.consistency_state.index,
"expected_index" => expected_index,
);
}
info!(
"save hash for consistency check later.";
"region_id" => self.fsm.region_id(),
"peer_id" => self.fsm.peer_id(),
"index" => expected_index,
);
self.fsm.peer.consistency_state.index = expected_index;
self.fsm.peer.consistency_state.hash = expected_hash;
true
}
}
/// Checks merge target, returns whether the source peer should be destroyed and whether the source peer is
/// merged to this target peer.
///
/// It returns (`can_destroy`, `merge_to_this_peer`).
///
/// `can_destroy` is true when there is a network isolation which leads to a follower of a merge target
/// Region's log falls behind and then receive a snapshot with epoch version after merge.
///
/// `merge_to_this_peer` is true when `can_destroy` is true and the source peer is merged to this target peer.
pub fn maybe_destroy_source(
meta: &StoreMeta,
target_region_id: u64,
target_peer_id: u64,
source_region_id: u64,
region_epoch: RegionEpoch,
) -> (bool, bool) {
if let Some(merge_targets) = meta.pending_merge_targets.get(&target_region_id) {
if let Some(target_region) = merge_targets.get(&source_region_id) {
info!(
"[region {}] checking source {} epoch: {:?}, merge target epoch: {:?}",
target_region_id,
source_region_id,
region_epoch,
target_region.get_region_epoch(),
);
// The target peer will move on, namely, it will apply a snapshot generated after merge,
// so destroy source peer.
if region_epoch.get_version() > target_region.get_region_epoch().get_version() {
return (
true,
target_peer_id
== util::find_peer(target_region, meta.store_id.unwrap())
.unwrap()
.get_id(),
);
}
// Wait till the target peer has caught up logs and source peer will be destroyed at that time.
return (false, false);
}
}
(false, false)
}
pub fn new_read_index_request(
region_id: u64,
region_epoch: RegionEpoch,
peer: metapb::Peer,
) -> RaftCmdRequest {
let mut request = RaftCmdRequest::default();
request.mut_header().set_region_id(region_id);
request.mut_header().set_region_epoch(region_epoch);
request.mut_header().set_peer(peer);
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::ReadIndex);
request
}
pub fn new_admin_request(region_id: u64, peer: metapb::Peer) -> RaftCmdRequest {
let mut request = RaftCmdRequest::default();
request.mut_header().set_region_id(region_id);
request.mut_header().set_peer(peer);
request
}
fn new_verify_hash_request(
region_id: u64,
peer: metapb::Peer,
state: &ConsistencyState,
) -> RaftCmdRequest {
let mut request = new_admin_request(region_id, peer);
let mut admin = AdminRequest::default();
admin.set_cmd_type(AdminCmdType::VerifyHash);
admin.mut_verify_hash().set_index(state.index);
admin.mut_verify_hash().set_context(state.context.clone());
admin.mut_verify_hash().set_hash(state.hash.clone());
request.set_admin_request(admin);
request
}
fn new_compact_log_request(
region_id: u64,
peer: metapb::Peer,
compact_index: u64,
compact_term: u64,
) -> RaftCmdRequest {
let mut request = new_admin_request(region_id, peer);
let mut admin = AdminRequest::default();
admin.set_cmd_type(AdminCmdType::CompactLog);
admin.mut_compact_log().set_compact_index(compact_index);
admin.mut_compact_log().set_compact_term(compact_term);
request.set_admin_request(admin);
request
}
impl<'a, EK, ER, T: Transport> PeerFsmDelegate<'a, EK, ER, T>
where
EK: KvEngine,
ER: RaftEngine,
{
// Handle status commands here, separate the logic, maybe we can move it
// to another file later.
// Unlike other commands (write or admin), status commands only show current
// store status, so no need to handle it in raft group.
fn execute_status_command(&mut self, request: &RaftCmdRequest) -> Result<RaftCmdResponse> {
let cmd_type = request.get_status_request().get_cmd_type();
let mut response = match cmd_type {
StatusCmdType::RegionLeader => self.execute_region_leader(),
StatusCmdType::RegionDetail => self.execute_region_detail(request),
StatusCmdType::InvalidStatus => {
Err(box_err!("{} invalid status command!", self.fsm.peer.tag))
}
}?;
response.set_cmd_type(cmd_type);
let mut resp = RaftCmdResponse::default();
resp.set_status_response(response);
// Bind peer current term here.
bind_term(&mut resp, self.fsm.peer.term());
Ok(resp)
}
fn execute_region_leader(&mut self) -> Result<StatusResponse> {
let mut resp = StatusResponse::default();
if let Some(leader) = self.fsm.peer.get_peer_from_cache(self.fsm.peer.leader_id()) {
resp.mut_region_leader().set_leader(leader);
}
Ok(resp)
}
fn execute_region_detail(&mut self, request: &RaftCmdRequest) -> Result<StatusResponse> {
if !self.fsm.peer.get_store().is_initialized() {
let region_id = request.get_header().get_region_id();
return Err(Error::RegionNotInitialized(region_id));
}
let mut resp = StatusResponse::default();
resp.mut_region_detail()
.set_region(self.fsm.peer.region().clone());
if let Some(leader) = self.fsm.peer.get_peer_from_cache(self.fsm.peer.leader_id()) {
resp.mut_region_detail().set_leader(leader);
}
Ok(resp)
}
}
impl<EK: KvEngine, ER: RaftEngine> AbstractPeer for PeerFsm<EK, ER> {
fn meta_peer(&self) -> &metapb::Peer {
&self.peer.peer
}
fn group_state(&self) -> GroupState {
self.hibernate_state.group_state()
}
fn region(&self) -> &metapb::Region {
self.peer.raft_group.store().region()
}
fn apply_state(&self) -> &RaftApplyState {
self.peer.raft_group.store().apply_state()
}
fn raft_status(&self) -> raft::Status<'_> {
self.peer.raft_group.status()
}
fn raft_commit_index(&self) -> u64 {
self.peer.raft_group.store().commit_index()
}
fn pending_merge_state(&self) -> Option<&MergeState> {
self.peer.pending_merge_state.as_ref()
}
}
mod memtrace {
use super::*;
use memory_trace_macros::MemoryTraceHelper;
/// Heap size for Raft internal `ReadOnly`.
#[derive(MemoryTraceHelper, Default, Debug)]
pub struct PeerMemoryTrace {
/// `ReadOnly` memory usage in Raft groups.
pub read_only: usize,
/// `Progress` memory usage in Raft groups.
pub progress: usize,
/// `Proposal` memory usage for peers.
pub proposals: usize,
pub rest: usize,
}
impl<EK, ER> PeerFsm<EK, ER>
where
EK: KvEngine,
ER: RaftEngine,
{
pub fn raft_read_size(&self) -> usize {
let msg_size = mem::size_of::<raft::eraftpb::Message>();
let raft = &self.peer.raft_group.raft;
// We use Uuid for read request.
let mut size = raft.read_states.len() * (mem::size_of::<ReadState>() + 16);
size += raft.read_only.read_index_queue.len() * 16;
// Every requests have at least header, which should be at least 8 bytes.
size + raft.read_only.pending_read_index.len() * (16 + msg_size)
}
pub fn raft_progress_size(&self) -> usize {
let peer_cnt = self.peer.region().get_peers().len();
mem::size_of::<Progress>() * peer_cnt * 6 / 5
+ self.peer.raft_group.raft.inflight_buffers_size()
}
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use crate::store::local_metrics::RaftMetrics;
use crate::store::msg::{Callback, ExtCallback, RaftCommand};
use engine_test::kv::KvTestEngine;
use kvproto::raft_cmdpb::{
AdminRequest, CmdType, PutRequest, RaftCmdRequest, RaftCmdResponse, Request, Response,
StatusRequest,
};
use protobuf::Message;
use tikv_util::config::ReadableSize;
use super::*;
#[test]
fn test_batch_raft_cmd_request_builder() {
let mut cfg = Config::default();
cfg.raft_entry_max_size = ReadableSize(1000);
let mut builder = BatchRaftCmdRequestBuilder::<KvTestEngine>::new();
let mut q = Request::default();
let mut metric = RaftMetrics::new(true);
let mut req = RaftCmdRequest::default();
req.set_admin_request(AdminRequest::default());
assert!(!builder.can_batch(&cfg, &req, 0));
let mut req = RaftCmdRequest::default();
req.set_status_request(StatusRequest::default());
assert!(!builder.can_batch(&cfg, &req, 0));
let mut req = RaftCmdRequest::default();
let mut put = PutRequest::default();
put.set_key(b"aaaa".to_vec());
put.set_value(b"bbbb".to_vec());
q.set_cmd_type(CmdType::Put);
q.set_put(put);
req.mut_requests().push(q.clone());
let _ = q.take_put();
let req_size = req.compute_size();
assert!(builder.can_batch(&cfg, &req, req_size));
let mut req = RaftCmdRequest::default();
q.set_cmd_type(CmdType::Snap);
req.mut_requests().push(q.clone());
let mut put = PutRequest::default();
put.set_key(b"aaaa".to_vec());
put.set_value(b"bbbb".to_vec());
q.set_cmd_type(CmdType::Put);
q.set_put(put);
req.mut_requests().push(q.clone());
let req_size = req.compute_size();
assert!(!builder.can_batch(&cfg, &req, req_size));
let mut req = RaftCmdRequest::default();
let mut put = PutRequest::default();
put.set_key(b"aaaa".to_vec());
put.set_value(vec![8_u8; 2000]);
q.set_cmd_type(CmdType::Put);
q.set_put(put);
req.mut_requests().push(q.clone());
let req_size = req.compute_size();
assert!(!builder.can_batch(&cfg, &req, req_size));
// Check batch callback
let mut req = RaftCmdRequest::default();
let mut put = PutRequest::default();
put.set_key(b"aaaa".to_vec());
put.set_value(vec![8_u8; 20]);
q.set_cmd_type(CmdType::Put);
q.set_put(put);
req.mut_requests().push(q);
let mut cbs_flags = vec![];
let mut proposed_cbs_flags = vec![];
let mut committed_cbs_flags = vec![];
let mut response = RaftCmdResponse::default();
for i in 0..10 {
let flag = Arc::new(AtomicBool::new(false));
cbs_flags.push(flag.clone());
// Some commands don't have proposed_cb.
let proposed_cb: Option<ExtCallback> = if i % 2 == 0 {
let proposed_flag = Arc::new(AtomicBool::new(false));
proposed_cbs_flags.push(proposed_flag.clone());
Some(Box::new(move || {
proposed_flag.store(true, Ordering::Release);
}))
} else {
None
};
let committed_cb: Option<ExtCallback> = if i % 3 == 0 {
let committed_flag = Arc::new(AtomicBool::new(false));
committed_cbs_flags.push(committed_flag.clone());
Some(Box::new(move || {
committed_flag.store(true, Ordering::Release);
}))
} else {
None
};
let cb = Callback::write_ext(
Box::new(move |_resp| {
flag.store(true, Ordering::Release);
}),
proposed_cb,
committed_cb,
);
response.mut_responses().push(Response::default());
let cmd = RaftCommand::new(req.clone(), cb);
builder.add(cmd, 100);
}
let (request, mut callback) = builder.build(&mut metric).unwrap();
callback.invoke_proposed();
for flag in proposed_cbs_flags {
assert!(flag.load(Ordering::Acquire));
}
callback.invoke_committed();
for flag in committed_cbs_flags {
assert!(flag.load(Ordering::Acquire));
}
assert_eq!(10, request.get_requests().len());
callback.invoke_with_response(response);
for flag in cbs_flags {
assert!(flag.load(Ordering::Acquire));
}
}
}
| 40.344139 | 156 | 0.533321 |
61fda699d811b5f1be90edea0056ca6e7a4f7fdf | 4,384 | use crate::builtins::bytes::PyBytesRef;
use crate::builtins::code::PyCode;
use crate::builtins::module::PyModuleRef;
use crate::builtins::pystr::{self, PyStr, PyStrRef};
use crate::import;
use crate::pyobject::{BorrowValue, ItemProtocol, PyObjectRef, PyResult, PyValue};
use crate::vm::VirtualMachine;
#[cfg(feature = "threading")]
mod lock {
use crate::pyobject::PyResult;
use crate::stdlib::thread::RawRMutex;
use crate::vm::VirtualMachine;
pub(super) static IMP_LOCK: RawRMutex = RawRMutex::INIT;
pub(super) fn _imp_acquire_lock(_vm: &VirtualMachine) {
IMP_LOCK.lock()
}
pub(super) fn _imp_release_lock(vm: &VirtualMachine) -> PyResult<()> {
if !IMP_LOCK.is_locked() {
Err(vm.new_runtime_error("Global import lock not held".to_owned()))
} else {
unsafe { IMP_LOCK.unlock() };
Ok(())
}
}
pub(super) fn _imp_lock_held(_vm: &VirtualMachine) -> bool {
IMP_LOCK.is_locked()
}
}
#[cfg(not(feature = "threading"))]
mod lock {
use crate::vm::VirtualMachine;
pub(super) fn _imp_acquire_lock(_vm: &VirtualMachine) {}
pub(super) fn _imp_release_lock(_vm: &VirtualMachine) {}
pub(super) fn _imp_lock_held(_vm: &VirtualMachine) -> bool {
false
}
}
use lock::{_imp_acquire_lock, _imp_lock_held, _imp_release_lock};
fn _imp_extension_suffixes(vm: &VirtualMachine) -> PyResult {
Ok(vm.ctx.new_list(vec![]))
}
fn _imp_is_builtin(name: PyStrRef, vm: &VirtualMachine) -> bool {
vm.state.stdlib_inits.contains_key(name.borrow_value())
}
fn _imp_is_frozen(name: PyStrRef, vm: &VirtualMachine) -> bool {
vm.state.frozen.contains_key(name.borrow_value())
}
fn _imp_create_builtin(spec: PyObjectRef, vm: &VirtualMachine) -> PyResult {
let sys_modules = vm.get_attribute(vm.sys_module.clone(), "modules").unwrap();
let spec = vm.get_attribute(spec, "name")?;
let name = pystr::borrow_value(&spec);
if let Ok(module) = sys_modules.get_item(name, vm) {
Ok(module)
} else if let Some(make_module_func) = vm.state.stdlib_inits.get(name) {
Ok(make_module_func(vm))
} else {
Ok(vm.ctx.none())
}
}
fn _imp_exec_builtin(_mod: PyModuleRef) -> i32 {
// TOOD: Should we do something here?
0
}
fn _imp_get_frozen_object(name: PyStrRef, vm: &VirtualMachine) -> PyResult<PyCode> {
vm.state
.frozen
.get(name.borrow_value())
.map(|frozen| {
let mut frozen = frozen.code.clone();
frozen.source_path = PyStr::from(format!("frozen {}", name)).into_ref(vm);
PyCode::new(frozen)
})
.ok_or_else(|| vm.new_import_error(format!("No such frozen object named {}", name), name))
}
fn _imp_init_frozen(name: PyStrRef, vm: &VirtualMachine) -> PyResult {
import::import_frozen(vm, name.borrow_value())
}
fn _imp_is_frozen_package(name: PyStrRef, vm: &VirtualMachine) -> PyResult<bool> {
vm.state
.frozen
.get(name.borrow_value())
.map(|frozen| frozen.package)
.ok_or_else(|| vm.new_import_error(format!("No such frozen object named {}", name), name))
}
fn _imp_fix_co_filename(_code: PyObjectRef, _path: PyStrRef) {
// TODO:
}
fn _imp_source_hash(_key: u64, _source: PyBytesRef, vm: &VirtualMachine) -> PyResult {
// TODO:
Ok(vm.ctx.none())
}
pub fn make_module(vm: &VirtualMachine) -> PyObjectRef {
let ctx = &vm.ctx;
py_module!(vm, "_imp", {
"extension_suffixes" => named_function!(ctx, _imp, extension_suffixes),
"acquire_lock" => named_function!(ctx, _imp, acquire_lock),
"release_lock" => named_function!(ctx, _imp, release_lock),
"lock_held" => named_function!(ctx, _imp, lock_held),
"is_builtin" => named_function!(ctx, _imp, is_builtin),
"is_frozen" => named_function!(ctx, _imp, is_frozen),
"create_builtin" => named_function!(ctx, _imp, create_builtin),
"exec_builtin" => named_function!(ctx, _imp, exec_builtin),
"get_frozen_object" => named_function!(ctx, _imp, get_frozen_object),
"init_frozen" => named_function!(ctx, _imp, init_frozen),
"is_frozen_package" => named_function!(ctx, _imp, is_frozen_package),
"_fix_co_filename" => named_function!(ctx, _imp, fix_co_filename),
"source_hash" => named_function!(ctx, _imp, source_hash),
})
}
| 33.984496 | 98 | 0.655566 |
26117970f0b79bc611cfc3261c28ec2e68d1421a | 7,847 | //! rank/select 辞書。
use std::fmt::Debug;
use std::ops::{Range, RangeBounds};
use buf_range::bounds_within;
use count::Count;
use find_nth::FindNth;
const WORD_SIZE: usize = 64;
const WORD_SIZE_2: usize = WORD_SIZE * WORD_SIZE;
/// rank/select 辞書。
///
/// 要素が `0`/`1` からなる配列で、任意区間の `0`/`1` の個数を数えられる。
///
/// # Idea
/// 要素数 $n$ のビット配列に対して、rank/select のクエリはそれぞれ $n+1$
/// 通りしかないので、それらを $O(n)$ 時間で前計算しておけば、$3n+O(1)$ words で
/// $O(1)$ query time を実現できる[^1]。
///
/// [^1]: $\\mathtt{rank}\_1$ の結果から $\\mathtt{rank}\_0$ の結果を求めることは可能だが、
/// $\\mathtt{select}\_1$ の結果から $\\mathtt{select}\_0$ の結果を求めることはできない。
///
/// しかし、wavelet matrix などに用いる際はこれを 64 本持ったりする必要があることから、
/// 空間を削減できた方がうれしそうなので、$6n/w+O(1)$ words, $O(\\log(w))$ query time
/// の方法を用いた[^2]。
///
/// [^2]: やや複雑なので、もしかすると愚直の方がよい可能性もあるかも? 実測しましょう。
///
/// rank については、$w$ bits ごとに求めた個数の累積和 ($n/w$ words) を用いる。
/// 端数については word size の popcount を行う[^3]。
///
/// [^3]: $O(\\log(w))$ time の方法は実装していません。`.count_ones()`
/// の実測が遅いようなら考えます。ここを $O(1)$ time と見なしていいかはわかりません。
/// 簡潔データ構造の文脈では、「どうせ表引きできるので...」となっていそうです。
///
/// select については `0` と `1` に対して用意する必要があり、以下では `1` のみ述べるが、
/// `0` についても mutatis mutandis でできる。
/// まず、`1` の $w$ 個おきの出現箇所を求める (at most $n/w$ words)。このうち、幅が
/// $w^2$ 以上であるものを "疎" と呼び、そうでないところを "密" と呼ぶ。
/// 疎である区間は高々 $n/w^2$ 個しかないので、その出現位置を陽に持っても $n/w$
/// words で抑えられる。また、密である区間については、区間幅が $w^2$ 未満なので、
/// クエリごとに二分探索しても $\\log(w)$ time で抑えられる。
///
/// # Complexity
/// $O(n)$ preprocess, $O(n/w)$ space, $O(\\log(w))$ query time.
#[derive(Clone, Debug)]
pub struct RsDict {
len: usize,
buf: Vec<u64>,
rank: Vec<usize>,
sel0: Vec<SelectPreprocess>,
sel1: Vec<SelectPreprocess>,
}
#[derive(Clone, Debug)]
enum SelectPreprocess {
Sparse(Vec<usize>),
Dense(Range<usize>),
}
use SelectPreprocess::{Dense, Sparse};
impl From<Vec<bool>> for RsDict {
fn from(buf: Vec<bool>) -> Self {
let len = buf.len();
let buf = Self::compress_vec_bool(buf);
let rank = Self::preprocess_rank(&buf);
let sel0 = Self::preprocess_select(&buf, len, 0);
let sel1 = Self::preprocess_select(&buf, len, 1);
Self { len, buf, rank, sel0, sel1 }
}
}
impl RsDict {
fn compress_vec_bool(buf: Vec<bool>) -> Vec<u64> {
if buf.is_empty() {
return vec![];
}
let n = buf.len();
let nc = 1 + (n - 1) / WORD_SIZE;
let mut res = vec![0; nc + 1];
for i in 0..n {
if buf[i] {
res[i / WORD_SIZE] |= 1_u64 << (i % WORD_SIZE);
}
}
res
}
fn preprocess_rank(buf: &[u64]) -> Vec<usize> {
let n = buf.len();
let mut res = vec![0; n];
for i in 1..n {
res[i] = res[i - 1] + buf[i - 1].count_ones() as usize;
}
res
}
fn preprocess_select(
buf: &[u64],
n: usize,
x: u64,
) -> Vec<SelectPreprocess> {
let mut sel = vec![];
let mut tmp = vec![];
let mut last = 0;
for i in 0..n {
if buf[i / WORD_SIZE] >> (i % WORD_SIZE) & 1 != x {
continue;
}
if tmp.len() == WORD_SIZE {
let len = i - last;
if len < WORD_SIZE_2 {
sel.push(Dense(last..i));
} else {
sel.push(Sparse(tmp));
}
tmp = vec![];
last = i;
}
tmp.push(i);
}
if !tmp.is_empty() {
sel.push(Sparse(tmp));
}
sel
}
pub fn rank(&self, end: usize, x: u64) -> usize {
let il = end / WORD_SIZE;
let is = end % WORD_SIZE;
let rank1 = self.rank[il]
+ (self.buf[il] & !(!0_u64 << is)).count_ones() as usize;
let rank = if x == 0 { end - rank1 } else { rank1 };
rank
}
pub fn select(&self, x: u64, k: usize) -> Option<usize> {
if self.rank(self.len, x) < k {
None
} else if k == 0 {
Some(0)
} else {
Some(self.find_nth_internal(x, k - 1) + 1)
}
}
}
impl Count<u64> for RsDict {
fn count(&self, r: impl RangeBounds<usize>, x: u64) -> usize {
let Range { start, end } = bounds_within(r, self.len);
if start > 0 {
self.rank(end, x) - self.rank(start, x)
} else {
self.rank(end, x)
}
}
}
impl FindNth<u64> for RsDict {
fn find_nth(
&self,
r: impl RangeBounds<usize>,
x: u64,
n: usize,
) -> Option<usize> {
let Range { start, end } = bounds_within(r, self.len);
if self.count(start..end, x) <= n {
None
} else {
let offset = self.rank(start, x);
Some(self.find_nth_internal(x, offset + n))
}
}
}
impl RsDict {
fn find_nth_internal(&self, x: u64, n: usize) -> usize {
if self.rank(self.len, x) < n {
panic!("the number of {}s is less than {}", x, n);
}
let sel = if x == 0 { &self.sel0 } else { &self.sel1 };
let il = n / WORD_SIZE;
let is = n % WORD_SIZE;
eprintln!("{:?}", sel[il]);
match &sel[il] {
Sparse(dir) => dir[is],
Dense(range) => {
let mut lo = range.start / WORD_SIZE;
let mut hi = 1 + (range.end - 1) / WORD_SIZE;
while hi - lo > 1 {
let mid = lo + (hi - lo) / 2;
let rank = self.rank_rough(mid, x);
*(if rank <= n { &mut lo } else { &mut hi }) = mid;
}
let rank_frac = n - self.rank_rough(lo, x);
lo * WORD_SIZE
+ Self::find_nth_small(self.buf[lo], x, rank_frac)
}
}
}
fn rank_rough(&self, n: usize, x: u64) -> usize {
let rank1 = self.rank[n];
let rank = if x == 0 { n * WORD_SIZE - rank1 } else { rank1 };
rank
}
fn find_nth_small(word: u64, x: u64, n: usize) -> usize {
let mut word = if x == 0 { !word } else { word };
let mut n = n as u32;
let mut res = 0;
for &mid in &[32, 16, 8, 4, 2, 1] {
let count = (word & !(!0 << mid)).count_ones();
if count <= n {
n -= count;
word >>= mid;
res += mid;
}
}
res
}
}
#[test]
fn select_internal() {
assert_eq!(RsDict::find_nth_small(0x00000000_00000001_u64, 1, 0), 0);
assert_eq!(RsDict::find_nth_small(0x00000000_00000003_u64, 1, 1), 1);
assert_eq!(RsDict::find_nth_small(0x00000000_00000010_u64, 1, 0), 4);
assert_eq!(RsDict::find_nth_small(0xffffffff_ffffffff_u64, 1, 63), 63);
}
#[test]
fn test_rs() {
let n = 65536 + 4096;
let buf: Vec<_> = (0..n).map(|i| i % 1024 != 0).collect();
let rs: RsDict = buf.clone().into();
let mut zero = 0;
let mut one = 0;
for i in 0..n {
assert_eq!(rs.count(0..i, 0), zero);
assert_eq!(rs.count(0..i, 1), one);
if buf[i] {
one += 1;
} else {
zero += 1;
}
}
assert_eq!(rs.count(.., 0), zero);
assert_eq!(rs.count(.., 1), one);
let zeros: Vec<_> = (0..n).filter(|&i| !buf[i]).collect();
let ones: Vec<_> = (0..n).filter(|&i| buf[i]).collect();
for i in 0..zeros.len() {
let s0 = rs.find_nth(.., 0, i);
assert_eq!(s0, Some(zeros[i]));
assert_eq!(rs.count(..=s0.unwrap(), 0), i + 1);
}
for i in 0..ones.len() {
let s1 = rs.find_nth(.., 1, i);
assert_eq!(s1, Some(ones[i]));
assert_eq!(rs.count(..=s1.unwrap(), 1), i + 1);
}
assert_eq!(rs.find_nth(.., 0, zeros.len()), None);
assert_eq!(rs.find_nth(.., 1, ones.len()), None);
}
| 29.723485 | 75 | 0.493437 |
d725675ce9107f4b0e2bb95b1ecaece87c61ee8e | 7,474 | // Copyright 2020 Arnau Siches
// Licensed under the MIT license <LICENCE or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except
// according to those terms.
//! `heidi` implements the NHS number validation “Modulus 11”. See:
//! <https://www.datadictionary.nhs.uk/data_dictionary/attributes/n/nhs/nhs_number_de.asp>
//!
//! Example numbers were generated with <http://danielbayley.uk/nhs-number/>
//!
//! The NHS Number is a unique number allocated to every patient registered with
//! the NHS in England, Wales and the Isle of Man.
//!
//! In short, an NHS Number is always 10 digits long sometimes formatted in a 3-3-4 manner.
//! For example, `6541003238` can be presented as `654 100 3238`.
//!
//! The last digit of the number is the “check digit” to aid in integrity checks.
use crate::error::ValidationError;
use crate::number;
use std::convert::TryFrom;
use std::fmt;
use std::str::FromStr;
/// A digit can be from 0 to 9.
pub type Digit = u16;
/// Represents an NHS Number as a list of 9 digits (`Number.digits()`) plus 1
/// check digit (`Number.checkdigit()`).
///
/// # Examples
///
/// ```
/// use heidi::nhs::Number;
/// use std::str::FromStr;
///
/// let n = "6541003238";
/// let number = Number::from_str(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// Or from a `String`:
///
/// ```
/// use std::convert::TryFrom;
/// use heidi::nhs::Number;
///
/// let n = String::from("6541003238");
/// let number = Number::try_from(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// Finally, with a `u16` slice:
///
/// ```
/// use std::convert::TryFrom;
/// use heidi::nhs::Number;
///
/// let n: [u16; 10] = [6, 5, 4, 1, 0, 0, 3, 2, 3, 8];
/// let number = Number::try_from(&n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
#[derive(PartialEq, Clone, Debug)]
pub struct Number(number::Number);
impl Number {
/// Creates a new Number from the main 9 digits.
///
/// Prefer `FromStr` or `TryFrom<[Digit; 10]>` if you have a full NHS number.
///
/// # Examples
///
/// ```
/// use heidi::nhs::Number;
///
/// let n: [u16; 9] = [3, 7, 8, 3, 9, 5, 5, 6, 0];
/// let number = Number::new(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 2);
/// ```
pub fn new(digits: [Digit; 9]) -> Result<Self, ValidationError> {
Ok(Number(number::Number::new(digits)?))
}
pub fn checkdigit(&self) -> &Digit {
self.0.checkdigit()
}
pub fn digits(&self) -> &[Digit; 9] {
self.0.digits()
}
}
impl fmt::Display for Number {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
let digits = self.0.digits();
write!(formatter, "{}", &digits[0])?;
write!(formatter, "{}", &digits[1])?;
write!(formatter, "{}", &digits[2])?;
if formatter.alternate() {
write!(formatter, " ")?;
}
write!(formatter, "{}", &digits[3])?;
write!(formatter, "{}", &digits[4])?;
write!(formatter, "{}", &digits[5])?;
if formatter.alternate() {
write!(formatter, " ")?;
}
write!(formatter, "{}", &digits[6])?;
write!(formatter, "{}", &digits[7])?;
write!(formatter, "{}", &digits[8])?;
write!(formatter, "{}", self.0.checkdigit())?;
Ok(())
}
}
impl TryFrom<&[Digit; 10]> for Number {
type Error = ValidationError;
/// Converts an array slice of 10 decimal `u16` into a [`Number`].
///
/// # Examples
///
/// ```
/// use heidi::nhs::Number;
/// use std::convert::TryFrom;
///
/// let n: [u16; 10] = [6, 5, 4, 1, 0, 0, 3, 2, 3, 8];
/// let number = Number::try_from(&n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// # Errors
///
/// Fails with [ValidationError] when the check digit cannot be verified.
fn try_from(value: &[Digit; 10]) -> Result<Self, Self::Error> {
let number = number::Number::try_from(value)?;
Ok(Number(number))
}
}
impl TryFrom<String> for Number {
type Error = ValidationError;
/// Converts a string of 10 digits into a [`Number`].
///
/// ```
/// use heidi::nhs::Number;
/// use std::convert::TryFrom;
///
/// let n = String::from("6541003238");
/// let number = Number::try_from(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// # Errors
///
/// Fails with [ValidationError] when the check digit cannot be verified.
fn try_from(value: String) -> Result<Self, Self::Error> {
Number::from_str(&value)
}
}
impl TryFrom<usize> for Number {
type Error = ValidationError;
/// Converts an unsigned integer into a [`Number`].
///
/// # Examples
///
/// ```
/// use heidi::nhs::Number;
/// use std::convert::TryFrom;
///
/// let n: usize = 6541003238;
/// let number = Number::try_from(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// # Errors
///
/// Fails with [ValidationError] when the check digit cannot be verified.
fn try_from(value: usize) -> Result<Self, Self::Error> {
let number = number::Number::try_from(value)?;
Ok(Number(number))
}
}
impl FromStr for Number {
type Err = ValidationError;
/// Converts a string slice of 10 digits into a [`Number`].
///
/// ```
/// use heidi::nhs::Number;
/// use std::str::FromStr;
///
/// let n = "6541003238";
/// let number = Number::from_str(n);
///
/// assert_eq!(*number.unwrap().checkdigit(), 8);
/// ```
///
/// # Errors
///
/// Fails with [ValidationError] when the check digit cannot be verified.
fn from_str(s: &str) -> Result<Self, Self::Err> {
let number = number::Number::from_str(s)?;
Ok(Number(number))
}
}
/// Returns a random NHS Number.
///
/// If the result is not valid (e.g. the modulus 11 is 10) it will generate a new one.
///
/// # Examples
///
/// ```
/// use heidi::nhs::lottery;
///
/// let number = lottery();
/// assert!(number.is_ok());
/// ```
pub fn lottery() -> Result<Number, ValidationError> {
use rand::prelude::*;
let mut rng = rand::thread_rng();
let mut digits = [0u16; 9];
let distr = rand::distributions::Uniform::new_inclusive(0, 9);
for x in &mut digits {
*x = rng.sample(distr);
}
match Number::new(digits) {
Err(_) => lottery(),
number => number,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_formatted_string() -> Result<(), ValidationError> {
let f = Number::from_str("893 177 4583")?;
let u = Number::from_str("8931774583")?;
assert_eq!(f.checkdigit(), u.checkdigit());
assert_eq!(f.digits(), u.digits());
Ok(())
}
#[test]
fn display_compact() -> Result<(), ValidationError> {
let n = "893 177 4583";
let number = Number::from_str(n)?;
let expected = "8931774583";
assert_eq!(format!("{}", number), expected.to_string());
Ok(())
}
#[test]
fn display_alternate() -> Result<(), ValidationError> {
let n = String::from("893 177 4583");
let number = Number::from_str(&n)?;
assert_eq!(format!("{:#}", number), n);
Ok(())
}
}
| 25.772414 | 91 | 0.55004 |
9b863499e8219b5a2ba766e1a9aa5a4f591ea90d | 21,801 | use std::prelude::v1::*;
use crate::de::ParserNumber;
use crate::error::Error;
use crate::lib::*;
use serde::de::{self, Unexpected, Visitor};
use serde::{
forward_to_deserialize_any, serde_if_integer128, Deserialize, Deserializer, Serialize,
Serializer,
};
#[cfg(feature = "arbitrary_precision")]
use crate::error::ErrorCode;
#[cfg(feature = "arbitrary_precision")]
use serde::de::{IntoDeserializer, MapAccess};
#[cfg(feature = "arbitrary_precision")]
pub(crate) const TOKEN: &str = "$serde_json::private::Number";
/// Represents a JSON number, whether integer or floating point.
#[derive(Clone, Eq, PartialEq)]
pub struct Number {
n: N,
}
#[cfg(not(feature = "arbitrary_precision"))]
#[derive(Copy, Clone, PartialEq)]
enum N {
PosInt(u64),
/// Always less than zero.
NegInt(i64),
/// Always finite.
Float(f64),
}
// Implementing Eq is fine since any float values are always finite.
#[cfg(not(feature = "arbitrary_precision"))]
impl Eq for N {}
#[cfg(feature = "arbitrary_precision")]
type N = String;
impl Number {
/// Returns true if the `Number` is an integer between `i64::MIN` and
/// `i64::MAX`.
///
/// For any Number on which `is_i64` returns true, `as_i64` is guaranteed to
/// return the integer value.
///
/// ```
/// # use serde_json::json;
/// #
/// let big = i64::max_value() as u64 + 10;
/// let v = json!({ "a": 64, "b": big, "c": 256.0 });
///
/// assert!(v["a"].is_i64());
///
/// // Greater than i64::MAX.
/// assert!(!v["b"].is_i64());
///
/// // Numbers with a decimal point are not considered integers.
/// assert!(!v["c"].is_i64());
/// ```
#[inline]
pub fn is_i64(&self) -> bool {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::PosInt(v) => v <= i64::max_value() as u64,
N::NegInt(_) => true,
N::Float(_) => false,
}
#[cfg(feature = "arbitrary_precision")]
self.as_i64().is_some()
}
/// Returns true if the `Number` is an integer between zero and `u64::MAX`.
///
/// For any Number on which `is_u64` returns true, `as_u64` is guaranteed to
/// return the integer value.
///
/// ```
/// # use serde_json::json;
/// #
/// let v = json!({ "a": 64, "b": -64, "c": 256.0 });
///
/// assert!(v["a"].is_u64());
///
/// // Negative integer.
/// assert!(!v["b"].is_u64());
///
/// // Numbers with a decimal point are not considered integers.
/// assert!(!v["c"].is_u64());
/// ```
#[inline]
pub fn is_u64(&self) -> bool {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::PosInt(_) => true,
N::NegInt(_) | N::Float(_) => false,
}
#[cfg(feature = "arbitrary_precision")]
self.as_u64().is_some()
}
/// Returns true if the `Number` can be represented by f64.
///
/// For any Number on which `is_f64` returns true, `as_f64` is guaranteed to
/// return the floating point value.
///
/// Currently this function returns true if and only if both `is_i64` and
/// `is_u64` return false but this is not a guarantee in the future.
///
/// ```
/// # use serde_json::json;
/// #
/// let v = json!({ "a": 256.0, "b": 64, "c": -64 });
///
/// assert!(v["a"].is_f64());
///
/// // Integers.
/// assert!(!v["b"].is_f64());
/// assert!(!v["c"].is_f64());
/// ```
#[inline]
pub fn is_f64(&self) -> bool {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::Float(_) => true,
N::PosInt(_) | N::NegInt(_) => false,
}
#[cfg(feature = "arbitrary_precision")]
{
for c in self.n.chars() {
if c == '.' || c == 'e' || c == 'E' {
return self.n.parse::<f64>().ok().map_or(false, |f| f.is_finite());
}
}
false
}
}
/// If the `Number` is an integer, represent it as i64 if possible. Returns
/// None otherwise.
///
/// ```
/// # use serde_json::json;
/// #
/// let big = i64::max_value() as u64 + 10;
/// let v = json!({ "a": 64, "b": big, "c": 256.0 });
///
/// assert_eq!(v["a"].as_i64(), Some(64));
/// assert_eq!(v["b"].as_i64(), None);
/// assert_eq!(v["c"].as_i64(), None);
/// ```
#[inline]
pub fn as_i64(&self) -> Option<i64> {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::PosInt(n) => {
if n <= i64::max_value() as u64 {
Some(n as i64)
} else {
None
}
}
N::NegInt(n) => Some(n),
N::Float(_) => None,
}
#[cfg(feature = "arbitrary_precision")]
self.n.parse().ok()
}
/// If the `Number` is an integer, represent it as u64 if possible. Returns
/// None otherwise.
///
/// ```
/// # use serde_json::json;
/// #
/// let v = json!({ "a": 64, "b": -64, "c": 256.0 });
///
/// assert_eq!(v["a"].as_u64(), Some(64));
/// assert_eq!(v["b"].as_u64(), None);
/// assert_eq!(v["c"].as_u64(), None);
/// ```
#[inline]
pub fn as_u64(&self) -> Option<u64> {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::PosInt(n) => Some(n),
N::NegInt(_) | N::Float(_) => None,
}
#[cfg(feature = "arbitrary_precision")]
self.n.parse().ok()
}
/// Represents the number as f64 if possible. Returns None otherwise.
///
/// ```
/// # use serde_json::json;
/// #
/// let v = json!({ "a": 256.0, "b": 64, "c": -64 });
///
/// assert_eq!(v["a"].as_f64(), Some(256.0));
/// assert_eq!(v["b"].as_f64(), Some(64.0));
/// assert_eq!(v["c"].as_f64(), Some(-64.0));
/// ```
#[inline]
pub fn as_f64(&self) -> Option<f64> {
#[cfg(not(feature = "arbitrary_precision"))]
match self.n {
N::PosInt(n) => Some(n as f64),
N::NegInt(n) => Some(n as f64),
N::Float(n) => Some(n),
}
#[cfg(feature = "arbitrary_precision")]
self.n.parse::<f64>().ok().filter(|float| float.is_finite())
}
/// Converts a finite `f64` to a `Number`. Infinite or NaN values are not JSON
/// numbers.
///
/// ```
/// # use std::f64;
/// #
/// # use serde_json::Number;
/// #
/// assert!(Number::from_f64(256.0).is_some());
///
/// assert!(Number::from_f64(f64::NAN).is_none());
/// ```
#[inline]
pub fn from_f64(f: f64) -> Option<Number> {
if f.is_finite() {
let n = {
#[cfg(not(feature = "arbitrary_precision"))]
{
N::Float(f)
}
#[cfg(feature = "arbitrary_precision")]
{
ryu::Buffer::new().format_finite(f).to_owned()
}
};
Some(Number { n })
} else {
None
}
}
#[cfg(feature = "arbitrary_precision")]
/// Not public API. Only tests use this.
#[doc(hidden)]
#[inline]
pub fn from_string_unchecked(n: String) -> Self {
Number { n }
}
}
impl fmt::Display for Number {
#[cfg(not(feature = "arbitrary_precision"))]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self.n {
N::PosInt(u) => Display::fmt(&u, formatter),
N::NegInt(i) => Display::fmt(&i, formatter),
N::Float(f) => Display::fmt(&f, formatter),
}
}
#[cfg(feature = "arbitrary_precision")]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.n, formatter)
}
}
impl Debug for Number {
#[cfg(not(feature = "arbitrary_precision"))]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
let mut debug = formatter.debug_tuple("Number");
match self.n {
N::PosInt(i) => {
debug.field(&i);
}
N::NegInt(i) => {
debug.field(&i);
}
N::Float(f) => {
debug.field(&f);
}
}
debug.finish()
}
#[cfg(feature = "arbitrary_precision")]
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter
.debug_tuple("Number")
.field(&format_args!("{}", self.n))
.finish()
}
}
impl Serialize for Number {
#[cfg(not(feature = "arbitrary_precision"))]
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.n {
N::PosInt(u) => serializer.serialize_u64(u),
N::NegInt(i) => serializer.serialize_i64(i),
N::Float(f) => serializer.serialize_f64(f),
}
}
#[cfg(feature = "arbitrary_precision")]
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
use serde::ser::SerializeStruct;
let mut s = serializer.serialize_struct(TOKEN, 1)?;
s.serialize_field(TOKEN, &self.n)?;
s.end()
}
}
impl<'de> Deserialize<'de> for Number {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Number, D::Error>
where
D: Deserializer<'de>,
{
struct NumberVisitor;
impl<'de> Visitor<'de> for NumberVisitor {
type Value = Number;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a JSON number")
}
#[inline]
fn visit_i64<E>(self, value: i64) -> Result<Number, E> {
Ok(value.into())
}
#[inline]
fn visit_u64<E>(self, value: u64) -> Result<Number, E> {
Ok(value.into())
}
#[inline]
fn visit_f64<E>(self, value: f64) -> Result<Number, E>
where
E: de::Error,
{
Number::from_f64(value).ok_or_else(|| de::Error::custom("not a JSON number"))
}
#[cfg(feature = "arbitrary_precision")]
#[inline]
fn visit_map<V>(self, mut visitor: V) -> Result<Number, V::Error>
where
V: de::MapAccess<'de>,
{
let value = visitor.next_key::<NumberKey>()?;
if value.is_none() {
return Err(de::Error::invalid_type(Unexpected::Map, &self));
}
let v: NumberFromString = visitor.next_value()?;
Ok(v.value)
}
}
deserializer.deserialize_any(NumberVisitor)
}
}
#[cfg(feature = "arbitrary_precision")]
struct NumberKey;
#[cfg(feature = "arbitrary_precision")]
impl<'de> de::Deserialize<'de> for NumberKey {
fn deserialize<D>(deserializer: D) -> Result<NumberKey, D::Error>
where
D: de::Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> de::Visitor<'de> for FieldVisitor {
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a valid number field")
}
fn visit_str<E>(self, s: &str) -> Result<(), E>
where
E: de::Error,
{
if s == TOKEN {
Ok(())
} else {
Err(de::Error::custom("expected field with custom name"))
}
}
}
deserializer.deserialize_identifier(FieldVisitor)?;
Ok(NumberKey)
}
}
#[cfg(feature = "arbitrary_precision")]
pub struct NumberFromString {
pub value: Number,
}
#[cfg(feature = "arbitrary_precision")]
impl<'de> de::Deserialize<'de> for NumberFromString {
fn deserialize<D>(deserializer: D) -> Result<NumberFromString, D::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = NumberFromString;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("string containing a number")
}
fn visit_str<E>(self, s: &str) -> Result<NumberFromString, E>
where
E: de::Error,
{
let n = tri!(s.parse().map_err(de::Error::custom));
Ok(NumberFromString { value: n })
}
}
deserializer.deserialize_str(Visitor)
}
}
#[cfg(feature = "arbitrary_precision")]
fn invalid_number() -> Error {
Error::syntax(ErrorCode::InvalidNumber, 0, 0)
}
macro_rules! deserialize_any {
(@expand [$($num_string:tt)*]) => {
#[cfg(not(feature = "arbitrary_precision"))]
#[inline]
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
where
V: Visitor<'de>,
{
match self.n {
N::PosInt(u) => visitor.visit_u64(u),
N::NegInt(i) => visitor.visit_i64(i),
N::Float(f) => visitor.visit_f64(f),
}
}
#[cfg(feature = "arbitrary_precision")]
#[inline]
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
where V: Visitor<'de>
{
if let Some(u) = self.as_u64() {
return visitor.visit_u64(u);
} else if let Some(i) = self.as_i64() {
return visitor.visit_i64(i);
} else if let Some(f) = self.as_f64() {
if ryu::Buffer::new().format_finite(f) == self.n || f.to_string() == self.n {
return visitor.visit_f64(f);
}
}
visitor.visit_map(NumberDeserializer {
number: Some(self.$($num_string)*),
})
}
};
(owned) => {
deserialize_any!(@expand [n]);
};
(ref) => {
deserialize_any!(@expand [n.clone()]);
};
}
macro_rules! deserialize_number {
($deserialize:ident => $visit:ident) => {
#[cfg(not(feature = "arbitrary_precision"))]
fn $deserialize<V>(self, visitor: V) -> Result<V::Value, Error>
where
V: Visitor<'de>,
{
self.deserialize_any(visitor)
}
#[cfg(feature = "arbitrary_precision")]
fn $deserialize<V>(self, visitor: V) -> Result<V::Value, Error>
where
V: de::Visitor<'de>,
{
visitor.$visit(self.n.parse().map_err(|_| invalid_number())?)
}
};
}
impl<'de> Deserializer<'de> for Number {
type Error = Error;
deserialize_any!(owned);
deserialize_number!(deserialize_i8 => visit_i8);
deserialize_number!(deserialize_i16 => visit_i16);
deserialize_number!(deserialize_i32 => visit_i32);
deserialize_number!(deserialize_i64 => visit_i64);
deserialize_number!(deserialize_u8 => visit_u8);
deserialize_number!(deserialize_u16 => visit_u16);
deserialize_number!(deserialize_u32 => visit_u32);
deserialize_number!(deserialize_u64 => visit_u64);
deserialize_number!(deserialize_f32 => visit_f32);
deserialize_number!(deserialize_f64 => visit_f64);
serde_if_integer128! {
deserialize_number!(deserialize_i128 => visit_i128);
deserialize_number!(deserialize_u128 => visit_u128);
}
forward_to_deserialize_any! {
bool char str string bytes byte_buf option unit unit_struct
newtype_struct seq tuple tuple_struct map struct enum identifier
ignored_any
}
}
impl<'de, 'a> Deserializer<'de> for &'a Number {
type Error = Error;
deserialize_any!(ref);
deserialize_number!(deserialize_i8 => visit_i8);
deserialize_number!(deserialize_i16 => visit_i16);
deserialize_number!(deserialize_i32 => visit_i32);
deserialize_number!(deserialize_i64 => visit_i64);
deserialize_number!(deserialize_u8 => visit_u8);
deserialize_number!(deserialize_u16 => visit_u16);
deserialize_number!(deserialize_u32 => visit_u32);
deserialize_number!(deserialize_u64 => visit_u64);
deserialize_number!(deserialize_f32 => visit_f32);
deserialize_number!(deserialize_f64 => visit_f64);
serde_if_integer128! {
deserialize_number!(deserialize_i128 => visit_i128);
deserialize_number!(deserialize_u128 => visit_u128);
}
forward_to_deserialize_any! {
bool char str string bytes byte_buf option unit unit_struct
newtype_struct seq tuple tuple_struct map struct enum identifier
ignored_any
}
}
#[cfg(feature = "arbitrary_precision")]
pub(crate) struct NumberDeserializer {
pub number: Option<String>,
}
#[cfg(feature = "arbitrary_precision")]
impl<'de> MapAccess<'de> for NumberDeserializer {
type Error = Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where
K: de::DeserializeSeed<'de>,
{
if self.number.is_none() {
return Ok(None);
}
seed.deserialize(NumberFieldDeserializer).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where
V: de::DeserializeSeed<'de>,
{
seed.deserialize(self.number.take().unwrap().into_deserializer())
}
}
#[cfg(feature = "arbitrary_precision")]
struct NumberFieldDeserializer;
#[cfg(feature = "arbitrary_precision")]
impl<'de> Deserializer<'de> for NumberFieldDeserializer {
type Error = Error;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Error>
where
V: de::Visitor<'de>,
{
visitor.visit_borrowed_str(TOKEN)
}
forward_to_deserialize_any! {
bool u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64 char str string seq
bytes byte_buf map struct option unit newtype_struct ignored_any
unit_struct tuple_struct tuple enum identifier
}
}
impl From<ParserNumber> for Number {
fn from(value: ParserNumber) -> Self {
let n = match value {
ParserNumber::F64(f) => {
#[cfg(not(feature = "arbitrary_precision"))]
{
N::Float(f)
}
#[cfg(feature = "arbitrary_precision")]
{
f.to_string()
}
}
ParserNumber::U64(u) => {
#[cfg(not(feature = "arbitrary_precision"))]
{
N::PosInt(u)
}
#[cfg(feature = "arbitrary_precision")]
{
u.to_string()
}
}
ParserNumber::I64(i) => {
#[cfg(not(feature = "arbitrary_precision"))]
{
N::NegInt(i)
}
#[cfg(feature = "arbitrary_precision")]
{
i.to_string()
}
}
#[cfg(feature = "arbitrary_precision")]
ParserNumber::String(s) => s,
};
Number { n }
}
}
macro_rules! impl_from_unsigned {
(
$($ty:ty),*
) => {
$(
impl From<$ty> for Number {
#[inline]
fn from(u: $ty) -> Self {
let n = {
#[cfg(not(feature = "arbitrary_precision"))]
{ N::PosInt(u as u64) }
#[cfg(feature = "arbitrary_precision")]
{
itoa::Buffer::new().format(u).to_owned()
}
};
Number { n }
}
}
)*
};
}
macro_rules! impl_from_signed {
(
$($ty:ty),*
) => {
$(
impl From<$ty> for Number {
#[inline]
fn from(i: $ty) -> Self {
let n = {
#[cfg(not(feature = "arbitrary_precision"))]
{
if i < 0 {
N::NegInt(i as i64)
} else {
N::PosInt(i as u64)
}
}
#[cfg(feature = "arbitrary_precision")]
{
itoa::Buffer::new().format(i).to_owned()
}
};
Number { n }
}
}
)*
};
}
impl_from_unsigned!(u8, u16, u32, u64, usize);
impl_from_signed!(i8, i16, i32, i64, isize);
#[cfg(feature = "arbitrary_precision")]
serde_if_integer128! {
impl From<i128> for Number {
fn from(i: i128) -> Self {
Number { n: i.to_string() }
}
}
impl From<u128> for Number {
fn from(u: u128) -> Self {
Number { n: u.to_string() }
}
}
}
impl Number {
#[cfg(not(feature = "arbitrary_precision"))]
#[cold]
pub(crate) fn unexpected(&self) -> Unexpected {
match self.n {
N::PosInt(u) => Unexpected::Unsigned(u),
N::NegInt(i) => Unexpected::Signed(i),
N::Float(f) => Unexpected::Float(f),
}
}
#[cfg(feature = "arbitrary_precision")]
#[cold]
pub(crate) fn unexpected(&self) -> Unexpected {
Unexpected::Other("number")
}
}
| 29.068 | 93 | 0.50328 |
f7dfb33f89b14d7dfb5741f1d7b5335a350f34a5 | 731 | // This file is part of linux-epoll. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT. No part of linux-epoll, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of linux-epoll. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT.
include!("Dhcid.rs");
include!("DhcidDigest.rs");
include!("DhcidResourceRecordIgnoredBecauseReason.rs");
include!("IdentifierType.rs");
| 81.222222 | 388 | 0.79617 |
d52a778603b2cbb15d2a6162fd0e5ef5ded6da61 | 768 | // result1.rs
// Make this test pass! Execute `rustlings hint result1` for hints :)
#[derive(PartialEq, Debug)]
struct PositiveNonzeroInteger(u64);
#[derive(PartialEq, Debug)]
enum CreationError {
Negative,
Zero,
}
impl PositiveNonzeroInteger {
fn new(value: i64) -> Result<PositiveNonzeroInteger, CreationError> {
if value>0 {Ok(PositiveNonzeroInteger(value as u64))}
else if value==0 {
Err(CreationError::Zero)}else
{ Err(CreationError::Negative)
}
}
}
#[test]
fn test_creation() {
assert!(PositiveNonzeroInteger::new(10).is_ok());
assert_eq!(
Err(CreationError::Negative),
PositiveNonzeroInteger::new(-10)
);
assert_eq!(Err(CreationError::Zero), PositiveNonzeroInteger::new(0));
}
| 23.272727 | 73 | 0.669271 |
67b9a96e709108a476497783898e89d4d9bd5d4e | 4,926 | /* Copyright 2018 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use super::{BinaryReader, BinaryReaderError, Operator, Result};
#[derive(Clone)]
pub struct OperatorsReader<'a> {
pub(crate) reader: BinaryReader<'a>,
}
impl<'a> OperatorsReader<'a> {
pub(crate) fn new<'b>(data: &'a [u8], offset: usize) -> OperatorsReader<'b>
where
'a: 'b,
{
OperatorsReader {
reader: BinaryReader::new_with_offset(data, offset),
}
}
pub fn eof(&self) -> bool {
self.reader.eof()
}
pub fn original_position(&self) -> usize {
self.reader.original_position()
}
pub fn allow_memarg64(&mut self, allow: bool) {
self.reader.allow_memarg64(allow);
}
pub fn ensure_end(&self) -> Result<()> {
if self.eof() {
return Ok(());
}
Err(BinaryReaderError::new(
"Unexpected data at the end of operators",
self.reader.original_position(),
))
}
pub fn read<'b>(&mut self) -> Result<Operator<'b>>
where
'a: 'b,
{
self.reader.read_operator()
}
pub fn into_iter_with_offsets<'b>(self) -> OperatorsIteratorWithOffsets<'b>
where
'a: 'b,
{
OperatorsIteratorWithOffsets {
reader: self,
err: false,
}
}
pub fn read_with_offset<'b>(&mut self) -> Result<(Operator<'b>, usize)>
where
'a: 'b,
{
let pos = self.reader.original_position();
Ok((self.read()?, pos))
}
pub fn get_binary_reader(&self) -> BinaryReader<'a> {
self.reader.clone()
}
}
impl<'a> IntoIterator for OperatorsReader<'a> {
type Item = Result<Operator<'a>>;
type IntoIter = OperatorsIterator<'a>;
/// Reads content of the code section.
///
/// # Examples
/// ```
/// use wasmparser::{Operator, CodeSectionReader, Result};
/// # let data: &[u8] = &[
/// # 0x01, 0x03, 0x00, 0x01, 0x0b];
/// let mut code_reader = CodeSectionReader::new(data, 0).unwrap();
/// for _ in 0..code_reader.get_count() {
/// let body = code_reader.read().expect("function body");
/// let mut op_reader = body.get_operators_reader().expect("op reader");
/// let ops = op_reader.into_iter().collect::<Result<Vec<Operator>>>().expect("ops");
/// assert!(
/// if let [Operator::Nop, Operator::End] = ops.as_slice() { true } else { false },
/// "found {:?}",
/// ops
/// );
/// }
/// ```
fn into_iter(self) -> Self::IntoIter {
OperatorsIterator {
reader: self,
err: false,
}
}
}
pub struct OperatorsIterator<'a> {
reader: OperatorsReader<'a>,
err: bool,
}
impl<'a> Iterator for OperatorsIterator<'a> {
type Item = Result<Operator<'a>>;
fn next(&mut self) -> Option<Self::Item> {
if self.err || self.reader.eof() {
return None;
}
let result = self.reader.read();
self.err = result.is_err();
Some(result)
}
}
pub struct OperatorsIteratorWithOffsets<'a> {
reader: OperatorsReader<'a>,
err: bool,
}
impl<'a> Iterator for OperatorsIteratorWithOffsets<'a> {
type Item = Result<(Operator<'a>, usize)>;
/// Reads content of the code section with offsets.
///
/// # Examples
/// ```
/// use wasmparser::{Operator, CodeSectionReader, Result};
/// # let data: &[u8] = &[
/// # 0x01, 0x03, 0x00, /* offset = 23 */ 0x01, 0x0b];
/// let mut code_reader = CodeSectionReader::new(data, 20).unwrap();
/// for _ in 0..code_reader.get_count() {
/// let body = code_reader.read().expect("function body");
/// let mut op_reader = body.get_operators_reader().expect("op reader");
/// let ops = op_reader.into_iter_with_offsets().collect::<Result<Vec<(Operator, usize)>>>().expect("ops");
/// assert!(
/// if let [(Operator::Nop, 23), (Operator::End, 24)] = ops.as_slice() { true } else { false },
/// "found {:?}",
/// ops
/// );
/// }
/// ```
fn next(&mut self) -> Option<Self::Item> {
if self.err || self.reader.eof() {
return None;
}
let result = self.reader.read_with_offset();
self.err = result.is_err();
Some(result)
}
}
| 28.976471 | 115 | 0.564352 |
e8f9206e71da7056886f909eff660dd79b6c4e81 | 2,377 | use super::*;
use proptest::strategy::BoxedStrategy;
pub fn without_valid_option_errors_badarg(
source_file: &'static str,
native: fn(&Process, Term, Term) -> exception::Result<Term>,
) {
run(
source_file,
|arc_process| {
(
Just(arc_process.clone()),
super::strategy::term::float(arc_process.clone()),
(
Just(arc_process.clone()),
is_not_option(arc_process.clone()),
)
.prop_map(|(arc_process, option)| {
arc_process.list_from_slice(&[option]).unwrap()
}),
)
},
|(arc_process, float, options)| {
prop_assert_badarg!(
native(&arc_process, float, options),
"supported options are compact, {:decimal, 0..253}, or {:scientific, 0..249}"
);
Ok(())
},
);
}
fn is_not_option(arc_process: Arc<Process>) -> BoxedStrategy<Term> {
strategy::term(arc_process)
.prop_filter("Cannot be an option", |term| !is_option(term))
.boxed()
}
fn is_option(term: &Term) -> bool {
match term.decode().unwrap() {
TypedTerm::Atom(atom) => atom.name() == "compact",
TypedTerm::Tuple(tuple) => {
(tuple.len() == 2) && {
match tuple[0].decode().unwrap() {
TypedTerm::Atom(tag_atom) => match tag_atom.name() {
"decimals" => match tuple[1].decode().unwrap() {
TypedTerm::SmallInteger(small_integer) => {
let i: isize = small_integer.into();
0 <= i && i <= 253
}
_ => false,
},
"scientific" => match tuple[1].decode().unwrap() {
TypedTerm::SmallInteger(small_integer) => {
let i: isize = small_integer.into();
0 <= i && i <= 249
}
_ => false,
},
_ => false,
},
_ => false,
}
}
}
_ => false,
}
}
| 32.561644 | 93 | 0.411022 |
28bcea19e8b0824939360b9ea609da6cfae5a3ac | 10,180 | use crate::core::ribosome::CallContext;
use crate::core::ribosome::HostFnAccess;
use crate::core::ribosome::RibosomeT;
use crate::core::ribosome::ZomeCall;
use futures::future::join_all;
use holochain_types::prelude::*;
use holochain_wasmer_host::prelude::WasmError;
use std::sync::Arc;
pub fn call(
_ribosome: Arc<impl RibosomeT>,
call_context: Arc<CallContext>,
inputs: Vec<Call>,
) -> Result<Vec<ZomeCallResponse>, WasmError> {
match HostFnAccess::from(&call_context.host_context()) {
HostFnAccess {
write_workspace: Permission::Allow,
..
} => {
let results: Vec<Result<Result<ZomeCallResponse, _>, _>> =
tokio_helper::block_forever_on(async move {
join_all(inputs.into_iter().map(|input| async {
let Call {
to_cell,
zome_name,
fn_name,
cap_secret,
payload,
provenance,
} = input;
let cell_id = to_cell.unwrap_or_else(|| {
call_context
.host_context()
.call_zome_handle()
.cell_id()
.clone()
});
let invocation = ZomeCall {
cell_id,
zome_name,
fn_name,
payload,
cap_secret,
provenance,
};
call_context
.host_context()
.call_zome_handle()
.call_zome(
invocation,
call_context
.host_context()
.workspace_write()
.clone()
.try_into()
.expect("Must have source chain to make zome call"),
)
.await
}))
.await
});
let results: Result<Vec<_>, _> = results
.into_iter()
.map(|result| match result {
Ok(v) => match v {
Ok(v) => Ok(v),
Err(ribosome_error) => Err(WasmError::Host(ribosome_error.to_string())),
},
Err(conductor_api_error) => {
Err(WasmError::Host(conductor_api_error.to_string()))
}
})
.collect();
Ok(results?)
}
_ => unreachable!(),
}
}
#[cfg(test)]
pub mod wasm_test {
use std::convert::TryFrom;
use hdk::prelude::AgentInfo;
use hdk::prelude::CellId;
use holo_hash::HeaderHash;
use holochain_serialized_bytes::SerializedBytes;
use holochain_state::prelude::fresh_reader_test;
use holochain_types::prelude::*;
use holochain_wasm_test_utils::TestWasm;
use holochain_zome_types::test_utils::fake_agent_pubkey_2;
use holochain_zome_types::ExternIO;
use holochain_zome_types::ZomeCallResponse;
use matches::assert_matches;
use rusqlite::named_params;
use crate::conductor::{api::ZomeCall, ConductorHandle};
use crate::test_utils::conductor_setup::ConductorTestData;
use crate::test_utils::install_app;
use crate::test_utils::new_zome_call;
#[tokio::test(flavor = "multi_thread")]
async fn call_test() {
observability::test_run().ok();
let zomes = vec![TestWasm::WhoAmI];
let mut conductor_test = ConductorTestData::two_agents(zomes, true).await;
let handle = conductor_test.handle();
let bob_cell_id = conductor_test.bob_call_data().unwrap().cell_id.clone();
let alice_call_data = conductor_test.alice_call_data();
let alice_cell_id = &alice_call_data.cell_id;
let alice_agent_id = alice_cell_id.agent_pubkey();
let bob_agent_id = bob_cell_id.agent_pubkey();
// BOB INIT (to do cap grant)
let _ = handle
.call_zome(ZomeCall {
cell_id: bob_cell_id.clone(),
zome_name: TestWasm::WhoAmI.into(),
cap_secret: None,
fn_name: "set_access".into(),
payload: ExternIO::encode(()).unwrap(),
provenance: bob_agent_id.clone(),
})
.await
.unwrap();
// ALICE DOING A CALL
let output = handle
.call_zome(ZomeCall {
cell_id: alice_cell_id.clone(),
zome_name: TestWasm::WhoAmI.into(),
cap_secret: None,
fn_name: "who_are_they_local".into(),
payload: ExternIO::encode(&bob_cell_id).unwrap(),
provenance: alice_agent_id.clone(),
})
.await
.unwrap()
.unwrap();
match output {
ZomeCallResponse::Ok(guest_output) => {
let agent_info: AgentInfo = guest_output.decode().unwrap();
assert_eq!(
&agent_info.agent_initial_pubkey,
bob_agent_id
);
assert_eq!(
&agent_info.agent_latest_pubkey,
bob_agent_id
);
}
_ => unreachable!(),
}
conductor_test.shutdown_conductor().await;
}
/// When calling the same cell we need to make sure
/// the "as at" doesn't cause the original zome call to fail
/// when they are both writing (moving the source chain forward)
#[tokio::test(flavor = "multi_thread")]
async fn call_the_same_cell() {
observability::test_run().ok();
let zomes = vec![TestWasm::WhoAmI, TestWasm::Create];
let mut conductor_test = ConductorTestData::two_agents(zomes, false).await;
let handle = conductor_test.handle();
let alice_call_data = conductor_test.alice_call_data();
let alice_cell_id = &alice_call_data.cell_id;
let invocation =
new_zome_call(&alice_cell_id, "call_create_entry", (), TestWasm::Create).unwrap();
let result = handle.call_zome(invocation).await;
assert_matches!(result, Ok(Ok(ZomeCallResponse::Ok(_))));
// Get the header hash of that entry
let header_hash: HeaderHash =
unwrap_to::unwrap_to!(result.unwrap().unwrap() => ZomeCallResponse::Ok)
.decode()
.unwrap();
// Check alice's source chain contains the new value
let has_hash: bool = fresh_reader_test(alice_call_data.authored_env.clone(), |txn| {
txn.query_row(
"SELECT EXISTS(SELECT 1 FROM DhtOp WHERE header_hash = :hash)",
named_params! {
":hash": header_hash
},
|row| row.get(0),
)
.unwrap()
});
assert!(has_hash);
conductor_test.shutdown_conductor().await;
}
/// test calling a different zome
/// in a different cell.
#[tokio::test(flavor = "multi_thread")]
async fn bridge_call() {
observability::test_run().ok();
let zomes = vec![TestWasm::Create];
let mut conductor_test = ConductorTestData::two_agents(zomes, false).await;
let handle = conductor_test.handle();
let alice_call_data = conductor_test.alice_call_data();
let alice_cell_id = &alice_call_data.cell_id;
// Install a different dna for bob
let zomes = vec![TestWasm::WhoAmI];
let bob_cell_id = install_new_app("bobs_dna", zomes, &handle).await;
// Call create_entry in the create_entry zome from the whoami zome
let invocation = new_zome_call(
&bob_cell_id,
"call_create_entry",
alice_cell_id.clone(),
TestWasm::WhoAmI,
)
.unwrap();
let result = handle.call_zome(invocation).await;
assert_matches!(result, Ok(Ok(ZomeCallResponse::Ok(_))));
// Get the header hash of that entry
let header_hash: HeaderHash =
unwrap_to::unwrap_to!(result.unwrap().unwrap() => ZomeCallResponse::Ok)
.decode()
.unwrap();
// Check alice's source chain contains the new value
let has_hash: bool = fresh_reader_test(alice_call_data.authored_env.clone(), |txn| {
txn.query_row(
"SELECT EXISTS(SELECT 1 FROM DhtOp WHERE header_hash = :hash)",
named_params! {
":hash": header_hash
},
|row| row.get(0),
)
.unwrap()
});
assert!(has_hash);
conductor_test.shutdown_conductor().await;
}
async fn install_new_app(
dna_name: &str,
zomes: Vec<TestWasm>,
handle: &ConductorHandle,
) -> CellId {
let dna_file = DnaFile::new(
DnaDef {
name: dna_name.to_string(),
uid: "ba1d046d-ce29-4778-914b-47e6010d2faf".to_string(),
properties: SerializedBytes::try_from(()).unwrap(),
zomes: zomes.clone().into_iter().map(Into::into).collect(),
},
zomes.into_iter().map(Into::into),
)
.await
.unwrap();
let bob_agent_id = fake_agent_pubkey_2();
let bob_cell_id = CellId::new(dna_file.dna_hash().to_owned(), bob_agent_id.clone());
let bob_installed_cell = InstalledCell::new(bob_cell_id.clone(), "bob_handle".into());
let cell_data = vec![(bob_installed_cell, None)];
install_app("bob_app", cell_data, vec![dna_file], handle.clone()).await;
bob_cell_id
}
}
| 37.153285 | 96 | 0.520432 |
6aa4f8cf01dd2ee5a122e49ebed07a4db90ec955 | 1,648 | use nix::unistd;
use std::io::Write;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
use std::env;
use err_derive::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error(display = "Error message: {:?}", _0)]
Message(String),
}
use std::error;
pub type Result<I> = std::result::Result<I, Box<dyn error::Error>>;
pub const SHELL_NAME: &'static str = "rushell";
/// `File`-like object but does not close the `fd`.
pub struct FdFile {
fd: RawFd,
}
impl FdFile {
pub fn new(fd: RawFd) -> FdFile {
FdFile { fd }
}
pub fn fd(&self) -> RawFd {
self.fd
}
}
impl Write for FdFile {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = unistd::write(self.fd, buf).expect("failed to write");
Ok(len)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
unistd::fsync(self.fd).ok();
Ok(())
}
}
pub fn home_dir() -> PathBuf {
dirs::home_dir().unwrap_or(PathBuf::from("/"))
}
// pub fn current_working_dir() -> PathBuf {
// env::current_dir().unwrap_or(PathBuf::from("/"))
// }
pub fn var_os(env: &str, default: &str) -> String {
if let Some(value) = env::var_os(env) {
if let Ok(value) = value.into_string() {
return value;
}
}
default.to_owned()
}
pub fn home_dir_for_user(user: &str) -> Option<String> {
use nix::libc::getpwnam;
use std::ffi::{CString, CStr};
let user = CString::new(user).ok()?;
unsafe {
let passwd = getpwnam(user.as_ptr());
return Some(CStr::from_ptr((*passwd).pw_dir).to_str().ok()?.to_string());
}
}
| 21.128205 | 81 | 0.570388 |
8f54e40cfc76eb04416e7c581688415635d29dc6 | 2,192 | // Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
use hashbrown::HashSet;
use std::collections::VecDeque;
use petgraph::visit::{GraphBase, VisitMap, Visitable};
use petgraph::EdgeType;
use crate::StablePyGraph;
pub fn bfs_undirected<Ty: EdgeType>(
graph: &StablePyGraph<Ty>,
start: <StablePyGraph<Ty> as GraphBase>::NodeId,
discovered: &mut <StablePyGraph<Ty> as Visitable>::Map,
) -> HashSet<usize> {
let mut component = HashSet::new();
component.insert(start.index());
let mut stack = VecDeque::new();
stack.push_front(start);
while let Some(node) = stack.pop_front() {
for succ in graph.neighbors_undirected(node) {
if discovered.visit(succ) {
stack.push_back(succ);
component.insert(succ.index());
}
}
}
component
}
pub fn connected_components<Ty>(graph: &StablePyGraph<Ty>) -> Vec<HashSet<usize>>
where
Ty: EdgeType,
{
let mut conn_components = Vec::new();
let mut discovered = graph.visit_map();
for start in graph.node_indices() {
if !discovered.visit(start) {
continue;
}
let component = bfs_undirected(graph, start, &mut discovered);
conn_components.push(component)
}
conn_components
}
pub fn number_connected_components<Ty>(graph: &StablePyGraph<Ty>) -> usize
where
Ty: EdgeType,
{
let mut num_components = 0;
let mut discovered = graph.visit_map();
for start in graph.node_indices() {
if !discovered.visit(start) {
continue;
}
num_components += 1;
bfs_undirected(graph, start, &mut discovered);
}
num_components
}
| 27.4 | 81 | 0.659215 |
deb2828ff4a0b1b16b78e25cd9ffa5697650737c | 6,412 | use super::{HttpVerb, NewRule, Recipe, RecipeCascaded, Rule, RuleType};
use anyhow::{bail, format_err, Error, Result};
use serde_json::Value;
use std::convert::{TryFrom, TryInto};
use uuid::Uuid;
impl TryInto<shared::Recipe> for Recipe {
type Error = Error;
fn try_into(self) -> Result<shared::Recipe> {
let Recipe {
url,
payload,
id,
created_at,
updated_at,
} = self;
let id = Some(id);
let payload = serde_json::from_str(&payload)?;
let created_at = Some(created_at);
let updated_at = Some(updated_at);
Ok(shared::Recipe {
id,
url,
payload,
created_at,
updated_at,
..shared::Recipe::default()
})
}
}
impl TryInto<shared::Recipe> for RecipeCascaded {
type Error = Error;
fn try_into(self) -> Result<shared::Recipe> {
let Recipe {
id,
url,
payload,
created_at,
updated_at,
} = self.0;
let rules = self
.1
.into_iter()
.map(TryInto::try_into)
.collect::<Result<Vec<shared::Rule>>>()?;
let id = Some(id);
let payload: Value = serde_json::from_str(&payload)?;
let created_at = Some(created_at);
let updated_at = Some(updated_at);
Ok(shared::Recipe {
id,
url,
rules,
payload,
created_at,
updated_at,
})
}
}
impl TryInto<shared::Rule> for Rule {
type Error = Error;
fn try_into(self) -> Result<shared::Rule> {
use RuleType::*;
let Rule {
rule_type,
key_path,
subject,
http_method,
id,
..
} = self;
let id = Some(id);
Ok(match rule_type {
Authenticated => shared::Rule::Authenticated {
id,
key_path: key_path.ok_or_else(|| format_err!("Field, key_path, must be Some!"))?,
},
Subject => shared::Rule::Subject {
id,
subject: subject.ok_or_else(|| format_err!("Field, subject, must be Some!"))?,
},
HttpMethod => shared::Rule::HttpMethod {
id,
http_method: http_method
.map(Into::into)
.ok_or_else(|| format_err!("Field, http_method, must be Some!"))?,
},
})
}
}
impl TryFrom<(Uuid, shared::Rule)> for Rule {
type Error = Error;
fn try_from(t: (Uuid, shared::Rule)) -> Result<Self> {
use shared::Rule::*;
let (recipe_id, r) = t;
Ok(match r {
Authenticated { id, key_path } => {
let id = id.ok_or_else(|| format_err!("Rule must have an ID!"))?;
Self {
id,
recipe_id,
rule_type: RuleType::Authenticated,
subject: None,
key_path: Some(key_path),
http_method: None,
}
}
Subject { id, subject } => {
let id = id.ok_or_else(|| format_err!("Rule must have an ID!"))?;
Self {
id,
recipe_id,
rule_type: RuleType::Subject,
subject: Some(subject),
key_path: None,
http_method: None,
}
}
HttpMethod { id, http_method } => {
let id = id.ok_or_else(|| format_err!("Rule must have an ID!"))?;
Self {
id,
recipe_id,
rule_type: RuleType::HttpMethod,
subject: None,
key_path: None,
http_method: Some(http_method.into()),
}
}
})
}
}
impl From<(Uuid, shared::Rule)> for NewRule {
fn from(t: (Uuid, shared::Rule)) -> Self {
let (recipe_id, r) = t;
use shared::Rule::*;
match r {
Authenticated { key_path, .. } => Self {
recipe_id,
rule_type: RuleType::Authenticated,
subject: None,
key_path: Some(key_path),
http_method: None,
},
Subject { subject, .. } => Self {
recipe_id,
rule_type: RuleType::Subject,
subject: Some(subject),
key_path: None,
http_method: None,
},
HttpMethod { http_method, .. } => Self {
recipe_id,
rule_type: RuleType::HttpMethod,
subject: None,
key_path: None,
http_method: Some(http_method.into()),
},
}
}
}
impl From<shared::HttpVerb> for HttpVerb {
fn from(v: shared::HttpVerb) -> Self {
use shared::HttpVerb::*;
match v {
Get => HttpVerb::Get,
Post => HttpVerb::Post,
Put => HttpVerb::Put,
Delete => HttpVerb::Delete,
}
}
}
impl Into<shared::HttpVerb> for HttpVerb {
fn into(self) -> shared::HttpVerb {
use HttpVerb::*;
match self {
Get => shared::HttpVerb::Get,
Post => shared::HttpVerb::Post,
Put => shared::HttpVerb::Put,
Delete => shared::HttpVerb::Delete,
}
}
}
impl TryFrom<&str> for HttpVerb {
type Error = Error;
fn try_from(s: &str) -> Result<Self> {
use HttpVerb::*;
match s {
"Get" => Ok(Get),
"Post" => Ok(Post),
"Put" => Ok(Put),
"Delete" => Ok(Delete),
_ => bail!(
"{} is not a valid HTTP verb! For conversion from strings, case matters.",
s
),
}
}
}
impl TryFrom<&str> for RuleType {
type Error = Error;
fn try_from(s: &str) -> Result<Self> {
use RuleType::*;
match s {
"Authenticated" => Ok(Authenticated),
"Subject" => Ok(Subject),
"HttpMethod" => Ok(HttpMethod),
_ => bail!("{} is not a valid rule type!", s),
}
}
}
| 28.497778 | 97 | 0.449626 |
56553b563d3e8d79454ac279d93735e7f63fc540 | 2,268 | use crate::id::EmojiId;
// HACK: Hack needed until this is supported: https://github.com/serde-rs/serde/issues/368
fn false_default() -> bool {
false
}
#[cfg_attr(
feature = "serde-support",
derive(serde::Deserialize, serde::Serialize)
)]
#[cfg_attr(feature = "serde-support", serde(untagged))]
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum ReactionType {
Custom {
#[serde(default = "false_default")]
animated: bool,
// Even though it says that the id can be nil in the docs,
// it is a bit misleading as that should only happen when
// the reaction is a unicode emoji and then it is caught by
// the other variant.
id: EmojiId,
// Name is nil if the emoji data is no longer avaiable, for
// example if the emoji have been deleted off the guild.
name: Option<String>,
},
Unicode {
name: String,
},
}
#[cfg(test)]
mod tests {
use super::ReactionType;
use crate::id::EmojiId;
use serde_test::Token;
#[test]
fn test_custom() {
let kind = ReactionType::Custom {
animated: false,
id: EmojiId(1337),
name: Some("foo".to_owned()),
};
serde_test::assert_de_tokens(
&kind,
&[
Token::Struct {
name: "ReactionType",
len: 3,
},
Token::Str("animated"),
Token::Bool(false),
Token::Str("id"),
Token::Str("1337"),
Token::Str("name"),
Token::Some,
Token::Str("foo"),
Token::StructEnd,
],
);
}
#[test]
fn test_unicode() {
let kind = ReactionType::Unicode {
name: "\u{1f643}".to_owned(),
};
serde_test::assert_de_tokens(
&kind,
&[
Token::Struct {
name: "ReactionType",
len: 2,
},
Token::Str("id"),
Token::None,
Token::Str("name"),
Token::Str("\u{1f643}"),
Token::StructEnd,
],
);
}
}
| 26.068966 | 90 | 0.47575 |
1d00cc6573a79657e3ed8f012cf6ae48aeea1fb4 | 5,673 | use rand::Rng;
pub const GRID_SIZE: usize = 32;
#[derive(PartialEq, Copy, Clone)]
pub enum TileType {
Wall,
Floor,
}
#[derive(Clone, Default)]
pub struct Map {
pub rows: usize,
pub columns: usize,
pub tiles: Vec<TileType>,
}
pub enum Orientation {
Horizontal,
Vertical,
}
/// Grid position, column by row
#[derive(PartialEq, Clone, Copy)]
pub struct GridDimension(pub usize);
#[derive(PartialEq, Clone, Copy)]
pub struct GridPosition(pub GridDimension, pub GridDimension);
#[derive(PartialEq, Clone, Copy)]
pub struct GridRectangle(pub GridPosition, pub GridDimension, pub GridDimension);
impl GridRectangle {
pub fn intersect(&self, other:&GridRectangle) -> bool {
let col = other.0.0;
let row = other.0.1;
(col.0 >= self.0.0.0 && col.0 <= self.0.0.0 + self.1.0) &&
(row.0 >= self.0.1.0 && row.0 <= self.0.1.0 + self.2.0)
}
pub fn center(&self) -> GridPosition {
GridPosition((
self.0.0 + self.1) / 2.into(),
(self.0.1 + self.2) / 2.into()
)
}
}
impl std::ops::Mul for GridDimension {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
GridDimension(self.0 * rhs.0)
}
}
impl std::ops::Add for GridDimension {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
GridDimension(self.0 + rhs.0)
}
}
impl std::ops::Div for GridDimension {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
GridDimension(self.0 / rhs.0)
}
}
impl From<usize> for GridDimension {
fn from(item: usize) -> Self {
GridDimension(item)
}
}
/// Coordinate x, y
pub struct Coordinate(pub f32, pub f32);
impl Map {
pub fn grid_to_index(&self, position: GridPosition) -> usize {
(position.1.0 * self.columns) + position.0.0
}
}
pub fn grid_to_coordinates(position: GridPosition) -> Coordinate {
Coordinate(
(position.0.0 * GRID_SIZE) as f32,
(position.1.0 * GRID_SIZE) as f32
)
}
pub fn coordinate_to_grid(coordinate: Coordinate) -> GridPosition {
GridPosition(
GridDimension(coordinate.0 as usize / GRID_SIZE),
GridDimension(coordinate.1 as usize / GRID_SIZE)
)
}
/// Create a simple map width, height and number of random blocks
pub fn create_simple_map(width: usize, height: usize, blocks: usize, player: (usize, usize)) -> Result<Map, String> {
if width % GRID_SIZE != 0 || height % GRID_SIZE != 0 {
return Err("Invalid dimensions, we need to be divisable by 32".to_string());
}
let columns = width / GRID_SIZE;
let rows = height / GRID_SIZE;
// Add borders, row 0, row N, column 0, column N
let mut tiles = vec![TileType::Floor; columns * rows];
// Left and right border
for i in 0..rows {
tiles[i * columns] = TileType::Wall;
tiles[(i * columns) + (columns - 1)] = TileType::Wall;
}
// Top and bottom border
for i in 0..columns {
tiles[i] = TileType::Wall;
tiles[(rows * columns) - (i + 1)] = TileType::Wall;
}
// Generate random blocks
let mut rng = rand::thread_rng();
for _ in 0..blocks {
let column = rng.gen_range(1..columns - 1);
let row = rng.gen_range(1..rows - 1);
if column != player.0 || row != player.1 {
tiles[(columns * row) + column] = TileType::Wall;
}
}
let map = Map {
rows,
columns,
tiles
};
Ok(map)
}
fn add_room_to_map(room: GridRectangle, map: &mut Map) {
let rows = room.2.0;
let cols = room.1.0;
let pos = room.0;
for row in 0..rows {
let start_idx = map.grid_to_index(GridPosition(pos.0, pos.1 + GridDimension(row)));
for idx in start_idx..(start_idx + cols) {
map.tiles[idx] = TileType::Floor;
}
}
}
fn add_tunnel(orientation: Orientation, position: &GridPosition, length: GridDimension, map: &mut Map) {
match orientation {
Orientation::Horizontal => {
for col in 0..length.0 {
let idx = map.grid_to_index(GridPosition(position.0 + GridDimension(col), position.1));
map.tiles[idx] = TileType::Floor;
}
},
Orientation::Vertical => {
for row in 0..length.0 {
let idx = map.grid_to_index(GridPosition(position.0, position.1 + GridDimension(row)));
map.tiles[idx] = TileType::Floor;
}
}
}
}
pub fn create_map(width: usize, height: usize, player: (usize, usize)) -> Result<Map, String> {
if width % GRID_SIZE != 0 || height % GRID_SIZE != 0 {
return Err("Invalid dimensions, we need to be divisable by 32".to_string());
}
let columns = width / GRID_SIZE;
let rows = height / GRID_SIZE;
let tiles = vec![TileType::Wall; columns * rows];
let mut map = Map {
rows,
columns,
tiles
};
add_room_to_map(
GridRectangle(
GridPosition(GridDimension(3),GridDimension(3)),
GridDimension(5),
GridDimension(5)
), &mut map);
Ok(map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_coordinate_to_grid() {
let coord = Coordinate(100.0, 100.0);
let grid = coordinate_to_grid(coord);
assert!(grid.0 == GridDimension(3) && grid.1 == GridDimension(3));
}
#[test]
fn test_grid_to_coordinate() {
let grid = GridPosition(GridDimension(7), GridDimension(7));
let coord = grid_to_coordinates(grid);
assert!(coord.0 == 224.0 && coord.1 == 224.0);
}
} | 27.014286 | 117 | 0.58276 |
6949f963d1e05bca063fd31905d7a405543a00a6 | 7,379 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
pub name: sku::Name,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<sku::Tier>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i32>,
}
pub mod sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
F0,
S1,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Free,
Standard,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityRequestBody {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CheckNameAvailabilityResponseBody {
#[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HealthBot {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<HealthBotProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HealthBotProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "botManagementPortalLink", default, skip_serializing_if = "Option::is_none")]
pub bot_management_portal_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HealthBotUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<IdentityType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<IdentityType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IdentityType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ValidationResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<error::Error>,
}
pub mod error {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<Error>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailableOperations {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OperationDetail>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")]
pub is_data_action: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationDisplay>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BotResponseList {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<HealthBot>,
}
| 40.994444 | 98 | 0.698198 |
e94a849f304ddaf9db50e37d292f04f0c34af6d9 | 1,221 | #![allow(non_snake_case)]
use curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED;
use curve25519_dalek::ristretto::{RistrettoPoint, CompressedRistretto};
use sha3::{Sha3_512};
pub const COMPRESSED_PEDERSEN_H: CompressedRistretto = CompressedRistretto([140, 146, 64, 180, 86, 169, 230, 220, 101, 195, 119, 161, 4, 141, 116, 95, 148, 160, 140, 219, 127, 68, 203, 205, 123, 70, 243, 64, 72, 135, 17, 52]);
pub const COMPRESSED_NATIVE: CompressedRistretto = CompressedRistretto([72, 2, 95, 153, 203, 254, 246, 104, 19, 19, 203, 9, 150, 245, 105, 42, 71, 184, 185, 77, 228, 204, 239, 66, 196, 171, 214, 194, 232, 253, 206, 21]);
pub fn PEDERSEN_H() -> RistrettoPoint {
RistrettoPoint::hash_from_bytes::<Sha3_512>(
RISTRETTO_BASEPOINT_COMPRESSED.as_bytes())
}
pub fn NATIVE() -> RistrettoPoint {
RistrettoPoint::hash_from_bytes::<Sha3_512>(
b"swapct_native".as_ref())
}
#[cfg(test)]
mod tests {
use super::{PEDERSEN_H, COMPRESSED_PEDERSEN_H, NATIVE, COMPRESSED_NATIVE};
#[test]
fn test_pedersen() {
assert_eq!(PEDERSEN_H().compress(), COMPRESSED_PEDERSEN_H);
}
#[test]
fn test_native() {
assert_eq!(NATIVE().compress(), COMPRESSED_NATIVE);
}
} | 37 | 226 | 0.686323 |
0ea6598a99f00afe7456963a18c0900a19a935bb | 6,264 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use std::cast;
use syntax::crateid::CrateId;
use back::svh::Svh;
// EBML enum definitions and utils shared by the encoder and decoder
pub static tag_items: uint = 0x00;
pub static tag_paths_data_name: uint = 0x01;
pub static tag_def_id: uint = 0x02;
pub static tag_items_data: uint = 0x03;
pub static tag_items_data_item: uint = 0x04;
pub static tag_items_data_item_family: uint = 0x05;
pub static tag_items_data_item_ty_param_bounds: uint = 0x06;
pub static tag_items_data_item_type: uint = 0x07;
pub static tag_items_data_item_symbol: uint = 0x08;
pub static tag_items_data_item_variant: uint = 0x09;
pub static tag_items_data_parent_item: uint = 0x0a;
pub static tag_items_data_item_is_tuple_struct_ctor: uint = 0x0b;
pub static tag_index: uint = 0x0c;
pub static tag_index_buckets: uint = 0x0d;
pub static tag_index_buckets_bucket: uint = 0x0e;
pub static tag_index_buckets_bucket_elt: uint = 0x0f;
pub static tag_index_table: uint = 0x10;
pub static tag_meta_item_name_value: uint = 0x11;
pub static tag_meta_item_name: uint = 0x12;
pub static tag_meta_item_value: uint = 0x13;
pub static tag_attributes: uint = 0x14;
pub static tag_attribute: uint = 0x15;
pub static tag_meta_item_word: uint = 0x16;
pub static tag_meta_item_list: uint = 0x17;
// The list of crates that this crate depends on
pub static tag_crate_deps: uint = 0x18;
// A single crate dependency
pub static tag_crate_dep: uint = 0x19;
pub static tag_crate_hash: uint = 0x1a;
pub static tag_crate_crateid: uint = 0x1b;
pub static tag_crate_dep_crateid: uint = 0x1d;
pub static tag_crate_dep_hash: uint = 0x1e;
pub static tag_mod_impl: uint = 0x1f;
pub static tag_item_trait_method: uint = 0x20;
pub static tag_item_trait_ref: uint = 0x21;
pub static tag_item_super_trait_ref: uint = 0x22;
// discriminator value for variants
pub static tag_disr_val: uint = 0x23;
// used to encode ast_map::PathElem
pub static tag_path: uint = 0x24;
pub static tag_path_len: uint = 0x25;
pub static tag_path_elem_mod: uint = 0x26;
pub static tag_path_elem_name: uint = 0x27;
pub static tag_item_field: uint = 0x28;
pub static tag_item_field_origin: uint = 0x29;
pub static tag_item_variances: uint = 0x2a;
/*
trait items contain tag_item_trait_method elements,
impl items contain tag_item_impl_method elements, and classes
have both. That's because some code treats classes like traits,
and other code treats them like impls. Because classes can contain
both, tag_item_trait_method and tag_item_impl_method have to be two
different tags.
*/
pub static tag_item_impl_method: uint = 0x30;
pub static tag_item_trait_method_explicit_self: uint = 0x31;
// Reexports are found within module tags. Each reexport contains def_ids
// and names.
pub static tag_items_data_item_reexport: uint = 0x38;
pub static tag_items_data_item_reexport_def_id: uint = 0x39;
pub static tag_items_data_item_reexport_name: uint = 0x3a;
// used to encode crate_ctxt side tables
#[deriving(Eq)]
#[repr(uint)]
pub enum astencode_tag { // Reserves 0x40 -- 0x5f
tag_ast = 0x40,
tag_tree = 0x41,
tag_id_range = 0x42,
tag_table = 0x43,
tag_table_id = 0x44,
tag_table_val = 0x45,
tag_table_def = 0x46,
tag_table_node_type = 0x47,
tag_table_node_type_subst = 0x48,
tag_table_freevars = 0x49,
tag_table_tcache = 0x4a,
tag_table_param_defs = 0x4b,
tag_table_mutbl = 0x4c,
tag_table_last_use = 0x4d,
tag_table_spill = 0x4e,
tag_table_method_map = 0x4f,
tag_table_vtable_map = 0x50,
tag_table_adjustments = 0x51,
tag_table_moves_map = 0x52,
tag_table_capture_map = 0x53
}
static first_astencode_tag: uint = tag_ast as uint;
static last_astencode_tag: uint = tag_table_capture_map as uint;
impl astencode_tag {
pub fn from_uint(value : uint) -> Option<astencode_tag> {
let is_a_tag = first_astencode_tag <= value && value <= last_astencode_tag;
if !is_a_tag { None } else {
Some(unsafe { cast::transmute(value) })
}
}
}
pub static tag_item_trait_method_sort: uint = 0x60;
pub static tag_item_impl_type_basename: uint = 0x61;
// Language items are a top-level directory (for speed). Hierarchy:
//
// tag_lang_items
// - tag_lang_items_item
// - tag_lang_items_item_id: u32
// - tag_lang_items_item_node_id: u32
pub static tag_lang_items: uint = 0x70;
pub static tag_lang_items_item: uint = 0x71;
pub static tag_lang_items_item_id: uint = 0x72;
pub static tag_lang_items_item_node_id: uint = 0x73;
pub static tag_item_unnamed_field: uint = 0x74;
pub static tag_items_data_item_visibility: uint = 0x76;
pub static tag_items_data_item_sized: uint = 0x77;
pub static tag_item_method_tps: uint = 0x79;
pub static tag_item_method_fty: uint = 0x7a;
pub static tag_mod_child: uint = 0x7b;
pub static tag_misc_info: uint = 0x7c;
pub static tag_misc_info_crate_items: uint = 0x7d;
pub static tag_item_method_provided_source: uint = 0x7e;
pub static tag_item_impl_vtables: uint = 0x7f;
pub static tag_impls: uint = 0x80;
pub static tag_impls_impl: uint = 0x81;
pub static tag_items_data_item_inherent_impl: uint = 0x82;
pub static tag_items_data_item_extension_impl: uint = 0x83;
pub static tag_region_param_def: uint = 0x84;
pub static tag_region_param_def_ident: uint = 0x85;
pub static tag_region_param_def_def_id: uint = 0x86;
pub static tag_native_libraries: uint = 0x87;
pub static tag_native_libraries_lib: uint = 0x88;
pub static tag_native_libraries_name: uint = 0x89;
pub static tag_native_libraries_kind: uint = 0x8a;
pub static tag_macro_registrar_fn: uint = 0x8b;
pub static tag_exported_macros: uint = 0x8c;
pub static tag_macro_def: uint = 0x8d;
#[deriving(Clone, Show)]
pub struct LinkMeta {
pub crateid: CrateId,
pub crate_hash: Svh,
}
| 29.971292 | 83 | 0.766284 |
9c1a17fb820013b43a72befd6efe69870f0215ab | 4,937 | //! Provides a client-server networking architecture to amethyst.
#![warn(missing_docs, rust_2018_idioms, rust_2018_compatibility)]
pub use crate::{
bundle::NetworkBundle,
connection::{ConnectionState, NetConnection, NetIdentity},
error::Result,
net_event::{NetEvent, NetPacket},
network_socket::NetSocketSystem,
server::{Host, ServerConfig},
};
use std::net::SocketAddr;
use bincode::{deserialize, serialize};
use laminar::Packet;
use serde::{de::DeserializeOwned, Serialize};
mod bundle;
mod connection;
mod error;
mod net_event;
mod network_socket;
mod server;
mod test;
/// Attempts to serialize the given `NetEvent` and returns a laminar packet.
/// Reliable ordered will be used by default.
fn serialize_event<E>(event: NetEvent<E>, addr: SocketAddr) -> Result<Packet>
where
E: Serialize,
{
match serialize(&event) {
Ok(packet) => Ok(Packet::reliable_ordered(addr, packet, None)),
Err(e) => Err(e.into()),
}
}
/// Attempts to serialize the given packet and returns a laminar packet.
fn serialize_packet<T>(packet: NetPacket<T>, addr: SocketAddr) -> Result<Packet>
where
T: Serialize,
{
let ser = serialize(&packet.content());
match ser {
Ok(payload) => Ok(match packet.delivery_guarantee() {
net_event::DeliveryGuarantee::Unreliable => match packet.ordering_guarantee() {
net_event::OrderingGuarantee::None => Packet::unreliable(addr, payload),
net_event::OrderingGuarantee::Sequenced(s) => {
Packet::unreliable_sequenced(addr, payload, s)
}
_ => unreachable!(
"Can not apply the guarantees: {:?}, {:?} to the packet.",
packet.ordering_guarantee(),
packet.delivery_guarantee()
),
},
net_event::DeliveryGuarantee::Reliable => match packet.ordering_guarantee() {
net_event::OrderingGuarantee::None => Packet::reliable_unordered(addr, payload),
net_event::OrderingGuarantee::Sequenced(s) => {
Packet::reliable_sequenced(addr, payload, s)
}
net_event::OrderingGuarantee::Ordered(o) => {
Packet::reliable_ordered(addr, payload, o)
}
},
}),
Err(e) => Err(e.into()),
}
}
// Attempts to deserialize an event from the raw byte data.
fn deserialize_event<T>(data: &[u8]) -> Result<T>
where
T: DeserializeOwned,
{
Ok(deserialize::<T>(data)?)
}
#[cfg(test)]
mod tests {
use crate::{deserialize_event, net_event::NetPacket, serialize_packet};
use laminar::{DeliveryGuarantee, OrderingGuarantee};
use std::net::SocketAddr;
#[test]
fn can_serialize_packets() {
let content = "abc".to_string();
let packet1 = NetPacket::reliable_unordered(content.clone());
let packet2 = NetPacket::reliable_ordered(content.clone(), None);
let packet3 = NetPacket::reliable_sequenced(content.clone(), None);
let packet4 = NetPacket::unreliable(content.clone());
let packet5 = NetPacket::unreliable_sequenced(content.clone(), None);
let addr: SocketAddr = "127.0.0.1:1234".parse().unwrap();
let serialized_packet1 = serialize_packet(packet1, addr).unwrap();
let serialized_packet2 = serialize_packet(packet2, addr).unwrap();
let serialized_packet3 = serialize_packet(packet3, addr).unwrap();
let serialized_packet4 = serialize_packet(packet4, addr).unwrap();
let serialized_packet5 = serialize_packet(packet5, addr).unwrap();
// assure correct guarantees
assert!(
serialized_packet1.delivery_guarantee() == DeliveryGuarantee::Reliable
&& serialized_packet1.order_guarantee() == OrderingGuarantee::None
);
assert!(
serialized_packet2.delivery_guarantee() == DeliveryGuarantee::Reliable
&& serialized_packet2.order_guarantee() == OrderingGuarantee::Ordered(None)
);
assert!(
serialized_packet3.delivery_guarantee() == DeliveryGuarantee::Reliable
&& serialized_packet3.order_guarantee() == OrderingGuarantee::Sequenced(None)
);
assert!(
serialized_packet4.delivery_guarantee() == DeliveryGuarantee::Unreliable
&& serialized_packet4.order_guarantee() == OrderingGuarantee::None
);
assert!(
serialized_packet5.delivery_guarantee() == DeliveryGuarantee::Unreliable
&& serialized_packet5.order_guarantee() == OrderingGuarantee::Sequenced(None)
);
}
#[test]
fn can_deserialize_event() {
let result =
deserialize_event::<NetPacket<String>>(&[3, 0, 0, 0, 0, 0, 0, 0, 97, 98, 99]).unwrap();
assert_eq!(result.content(), &"abc".to_string());
}
}
| 36.57037 | 99 | 0.630747 |
674059acb12817162d3611095142056ee76f2253 | 1,414 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::match_::MATCH_CONSTANTS;
use common::NamedItem;
use graphql_ir::{FragmentDefinition, OperationDefinition, Program, Transformed, Transformer};
/// A transform that removes field `splitOperations`. Intended for use when e.g.
/// printing queries to send to a GraphQL server.
pub fn skip_split_operation<'s>(program: &Program<'s>) -> Program<'s> {
let mut transform = SkipSplitOperation {};
transform
.transform_program(program)
.replace_or_else(|| program.clone())
}
pub struct SkipSplitOperation;
impl Transformer for SkipSplitOperation {
const NAME: &'static str = "SkipSplitOperationTransform";
const VISIT_ARGUMENTS: bool = false;
const VISIT_DIRECTIVES: bool = false;
fn transform_operation(
&mut self,
operation: &OperationDefinition,
) -> Transformed<OperationDefinition> {
if operation
.directives
.named(MATCH_CONSTANTS.custom_module_directive_name)
.is_some()
{
Transformed::Delete
} else {
Transformed::Keep
}
}
fn transform_fragment(&mut self, _: &FragmentDefinition) -> Transformed<FragmentDefinition> {
Transformed::Keep
}
}
| 30.085106 | 97 | 0.675389 |
e545204ae1c688944355c0638295574b6984bdc3 | 1,062 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Project repository protected tags API endpoints.
//!
//! These endpoints are used for querying a project's protected tags.
mod protect;
mod protected_tag;
mod protected_tags;
mod unprotect;
pub use self::protect::ProtectTag;
pub use self::protect::ProtectTagBuilder;
pub use self::protect::ProtectTagBuilderError;
pub use self::unprotect::UnprotectTag;
pub use self::unprotect::UnprotectTagBuilder;
pub use self::unprotect::UnprotectTagBuilderError;
pub use self::protected_tag::ProtectedTag;
pub use self::protected_tag::ProtectedTagBuilder;
pub use self::protected_tag::ProtectedTagBuilderError;
pub use self::protected_tags::ProtectedTags;
pub use self::protected_tags::ProtectedTagsBuilder;
pub use self::protected_tags::ProtectedTagsBuilderError;
| 34.258065 | 69 | 0.79096 |
d6c8c1dd55bd3e0af2ee1e7d62d3b60b06e1c5c0 | 1,942 | #[doc = "Register `DC_LATCH0_TIME_POS[%s]` reader"]
pub struct R(crate::R<DC_LATCH0_TIME_POS_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<DC_LATCH0_TIME_POS_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DC_LATCH0_TIME_POS_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DC_LATCH0_TIME_POS_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `DC_LATCH0_TIME_POS` reader - Captures System time at the positive edge of the Latch0 signal"]
pub struct DC_LATCH0_TIME_POS_R(crate::FieldReader<u32, u32>);
impl DC_LATCH0_TIME_POS_R {
pub(crate) fn new(bits: u32) -> Self {
DC_LATCH0_TIME_POS_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DC_LATCH0_TIME_POS_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:31 - Captures System time at the positive edge of the Latch0 signal"]
#[inline(always)]
pub fn dc_latch0_time_pos(&self) -> DC_LATCH0_TIME_POS_R {
DC_LATCH0_TIME_POS_R::new((self.bits & 0xffff_ffff) as u32)
}
}
#[doc = "Register captures System time at the positive edge of the Latch0 signal\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dc_latch0_time_pos](index.html) module"]
pub struct DC_LATCH0_TIME_POS_SPEC;
impl crate::RegisterSpec for DC_LATCH0_TIME_POS_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [dc_latch0_time_pos::R](R) reader structure"]
impl crate::Readable for DC_LATCH0_TIME_POS_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets DC_LATCH0_TIME_POS[%s]
to value 0"]
impl crate::Resettable for DC_LATCH0_TIME_POS_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 35.962963 | 292 | 0.682286 |
758f91344460d60953a79f9f45d4b1cdb3ed245b | 1,860 | extern crate diesel;
extern crate dotenv;
extern crate kuchiki;
extern crate server;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use dotenv::dotenv;
use kuchiki::traits::*;
use server::models::{get_author_link, Ad};
use server::schema::ads::*;
use server::schema::ads::dsl::*;
use server::start_logging;
use std::env;
use server::targeting_parser::{collect_advertiser, collect_targeting, Targeting};
pub fn get_advertiser(targ: &Option<String>, document: &kuchiki::NodeRef) -> Option<String> {
match *targ {
Some(ref targ) => {
let targeting_advertiser = collect_advertiser(targ);
println!("{:?}", targeting_advertiser);
let author_link_advertiser = get_author_link(document).map(|a| a.text_contents()).ok();
println!("{:?}", author_link_advertiser);
targeting_advertiser.or(author_link_advertiser)
},
None => get_author_link(document).map(|a| a.text_contents()).ok(),
}
}
fn main() {
dotenv().ok();
start_logging();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
let conn = PgConnection::establish(&database_url).unwrap();
let dbads: Vec<Ad> = ads.order(created_at.desc())
.filter(advertiser.is_null())
.load::<Ad>(&conn)
.expect("Couldn't get ads.");
for ad in dbads {
println!("looking at {:?}", ad.id);
let document = kuchiki::parse_html().one(ad.html.clone());
let advertiser_name = get_advertiser(&ad.targeting, &document);
if advertiser_name.is_some() {
println!("found advertiser for {:?} {:?}", ad.id, advertiser_name.clone().unwrap());
diesel::update(ads.find(ad.id))
.set(advertiser.eq(advertiser_name.unwrap()))
.execute(&conn)
.unwrap();
}
}
}
| 35.09434 | 99 | 0.623118 |
22f11af0a107499b21e01d0223fbef9dd63e1ea7 | 1,609 | extern crate amethyst;
mod tile;
mod board;
mod systems;
mod png_loader;
use board::Board;
use amethyst::{
prelude::*,
renderer::{
DisplayConfig, DrawFlat, Pipeline,
PosTex, RenderBundle, Stage,
},
core::{
transform::TransformBundle,
frame_limiter::FrameRateLimitStrategy,
},
};
use std::time::Duration;
fn main() -> amethyst::Result<()> {
// We'll put the rest of the code here.
amethyst::start_logger(Default::default());
let path = "./resources/display_config.ron";
let config = DisplayConfig::load(&path);
let pipe = Pipeline::build().with_stage(
Stage::with_backbuffer()
.clear_target([0.0, 0.0, 0.0, 1.0], 1.0)
.with_pass(DrawFlat::<PosTex>::new()),
);
let binding_path = format!(
"{}/resources/bindings_config.ron",
env!("CARGO_MANIFEST_DIR")
);
use amethyst::input::InputBundle;
let input_bundle = InputBundle::<String, String>::new().with_bindings_from_file(binding_path)?;
let game_data = GameDataBuilder::default()
.with_bundle(TransformBundle::new())?
.with_bundle(RenderBundle::new(pipe, Some(config)))?
.with_bundle(input_bundle)?
.with(systems::CursorSystem, "cursor_system", &["input_system"]); // Add this line
let mut game = Application::build("./", Board::new(15,10))?
.with_frame_limit(
FrameRateLimitStrategy::Yield,
10,
).build(game_data)?;
//let mut game = Application::new("./", Board::new(15, 10), game_data)?;
game.run();
Ok(())
}
| 24.014925 | 99 | 0.608452 |
f7e521c700e63828e34ac71de6d0a57a729a5c34 | 12,554 | //! # Integration Tests Pallet
//!
//! ## Overview
//!
//! Integration Tests pallet is responsible for checking complex test cases with several pallets
//! involved.
//! Tests are split into different files depending on what pallet they are related to. There is also
//! a scenario_tests.rs file which isn`t related to any particular pallet.
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(test)]
mod tests {
use controller::{ControllerData, PauseKeeper};
use frame_support::{
assert_noop, assert_ok, ord_parameter_types, pallet_prelude::GenesisBuild, parameter_types, PalletId,
};
use frame_system::{offchain::SendTransactionTypes, EnsureSignedBy};
use liquidity_pools::{PoolData, PoolUserData};
use minterest_model::MinterestModelData;
pub use minterest_primitives::currency::CurrencyType::{UnderlyingAsset, WrappedToken};
use minterest_primitives::{Balance, CurrencyId, Price, Rate};
use minterest_protocol::{Error as MinterestProtocolError, PoolInitData};
use orml_traits::{parameter_type_with_key, MultiCurrency};
use pallet_traits::{ControllerManager, CurrencyConverter, PoolsManager, PricesManager};
use sp_core::H256;
use sp_runtime::{
testing::{Header, TestXt},
traits::{AccountIdConversion, BlakeTwo256, IdentityLookup, One, Zero},
transaction_validity::TransactionPriority,
FixedPointNumber,
};
use sp_std::cell::RefCell;
use std::collections::HashMap;
use test_helper::*;
mod controller_tests;
mod liquidity_pools_tests;
mod minterest_model_tests;
mod minterest_protocol_tests;
mod mnt_token_tests;
mod scenario_tests;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
type Block = frame_system::mocking::MockBlock<Test>;
// Configure a mock runtime to test the pallet.
frame_support::construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
Tokens: orml_tokens::{Pallet, Storage, Call, Event<T>, Config<T>},
Currencies: orml_currencies::{Pallet, Call, Event<T>},
MinterestProtocol: minterest_protocol::{Pallet, Storage, Call, Event<T>},
TestPools: liquidity_pools::{Pallet, Storage, Call, Config<T>},
TestLiquidationPools: liquidation_pools::{Pallet, Storage, Call, Event<T>, Config<T>},
TestController: controller::{Pallet, Storage, Call, Event, Config<T>},
TestMinterestModel: minterest_model::{Pallet, Storage, Call, Event, Config<T>},
TestDex: dex::{Pallet, Storage, Call, Event<T>},
TestMntToken: mnt_token::{Pallet, Storage, Call, Event<T>, Config<T>},
TestRiskManager: risk_manager::{Pallet, Storage, Call, Event<T>, Config<T>},
TestWhitelist: whitelist_module::{Pallet, Storage, Call, Event<T>, Config<T>},
}
);
ord_parameter_types! {
pub const ZeroAdmin: AccountId = 0;
}
parameter_types! {
pub const LiquidityPoolsPalletId: PalletId = PalletId(*b"lqdy/min");
pub const LiquidationPoolsPalletId: PalletId = PalletId(*b"lqdn/min");
pub const MntTokenPalletId: PalletId = PalletId(*b"min/mntt");
pub LiquidityPoolAccountId: AccountId = LiquidityPoolsPalletId::get().into_account();
pub LiquidationPoolAccountId: AccountId = LiquidationPoolsPalletId::get().into_account();
pub MntTokenAccountId: AccountId = MntTokenPalletId::get().into_account();
pub InitialExchangeRate: Rate = Rate::one();
pub EnabledUnderlyingAssetsIds: Vec<CurrencyId> = CurrencyId::get_enabled_tokens_in_protocol(UnderlyingAsset);
pub EnabledWrappedTokensId: Vec<CurrencyId> = CurrencyId::get_enabled_tokens_in_protocol(WrappedToken);
}
mock_impl_system_config!(Test);
mock_impl_balances_config!(Test);
mock_impl_orml_tokens_config!(Test);
mock_impl_orml_currencies_config!(Test);
mock_impl_liquidity_pools_config!(Test);
mock_impl_liquidation_pools_config!(Test);
mock_impl_controller_config!(Test, ZeroAdmin);
mock_impl_minterest_model_config!(Test, ZeroAdmin);
mock_impl_dex_config!(Test);
mock_impl_minterest_protocol_config!(Test, ZeroAdmin);
mock_impl_mnt_token_config!(Test, ZeroAdmin);
mock_impl_risk_manager_config!(Test, ZeroAdmin);
mock_impl_whitelist_module_config!(Test, ZeroAdmin);
thread_local! {
static UNDERLYING_PRICE: RefCell<HashMap<CurrencyId, Price>> = RefCell::new(
[
(DOT, Price::one()),
(ETH, Price::one()),
(BTC, Price::one()),
(KSM, Price::one()),
]
.iter()
.cloned()
.collect());
}
pub struct MockPriceSource;
impl MockPriceSource {
pub fn set_underlying_price(currency_id: CurrencyId, price: Price) {
UNDERLYING_PRICE.with(|v| v.borrow_mut().insert(currency_id, price));
}
}
impl PricesManager<CurrencyId> for MockPriceSource {
fn get_underlying_price(currency_id: CurrencyId) -> Option<Price> {
UNDERLYING_PRICE.with(|v| v.borrow().get(¤cy_id).copied())
}
fn lock_price(_currency_id: CurrencyId) {}
fn unlock_price(_currency_id: CurrencyId) {}
}
thread_local! {
static FOUR: RefCell<Vec<u64>> = RefCell::new(vec![4]);
}
pub struct ExtBuilder {
endowed_accounts: Vec<(AccountId, CurrencyId, Balance)>,
pools: Vec<(CurrencyId, PoolData)>,
pool_user_data: Vec<(CurrencyId, AccountId, PoolUserData)>,
minted_pools: Vec<(CurrencyId, Balance)>,
controller_data: Vec<(CurrencyId, ControllerData<BlockNumber>)>,
minterest_model_params: Vec<(CurrencyId, MinterestModelData)>,
mnt_claim_threshold: Balance,
liquidation_fee: Vec<(CurrencyId, Rate)>,
liquidation_threshold: Rate,
}
impl Default for ExtBuilder {
fn default() -> Self {
Self {
endowed_accounts: vec![],
pools: vec![],
pool_user_data: vec![],
minted_pools: vec![],
controller_data: vec![
(
DOT,
ControllerData {
last_interest_accrued_block: 0,
protocol_interest_factor: Rate::saturating_from_rational(1, 10),
max_borrow_rate: Rate::saturating_from_rational(5, 1000),
collateral_factor: Rate::saturating_from_rational(9, 10), // 90%
borrow_cap: None,
protocol_interest_threshold: PROTOCOL_INTEREST_TRANSFER_THRESHOLD,
},
),
(
ETH,
ControllerData {
last_interest_accrued_block: 0,
protocol_interest_factor: Rate::saturating_from_rational(1, 10),
max_borrow_rate: Rate::saturating_from_rational(5, 1000),
collateral_factor: Rate::saturating_from_rational(9, 10), // 90%
borrow_cap: None,
protocol_interest_threshold: PROTOCOL_INTEREST_TRANSFER_THRESHOLD,
},
),
(
BTC,
ControllerData {
last_interest_accrued_block: 0,
protocol_interest_factor: Rate::saturating_from_rational(1, 10),
max_borrow_rate: Rate::saturating_from_rational(5, 1000),
collateral_factor: Rate::saturating_from_rational(9, 10), // 90%
borrow_cap: None,
protocol_interest_threshold: PROTOCOL_INTEREST_TRANSFER_THRESHOLD,
},
),
],
minterest_model_params: vec![
(
DOT,
MinterestModelData {
kink: Rate::saturating_from_rational(8, 10),
base_rate_per_block: Rate::zero(),
multiplier_per_block: Rate::saturating_from_rational(9, 1_000_000_000), // 0.047304 PerYear
jump_multiplier_per_block: Rate::saturating_from_rational(207, 1_000_000_000), // 1.09 PerYear
},
),
(
ETH,
MinterestModelData {
kink: Rate::saturating_from_rational(8, 10),
base_rate_per_block: Rate::zero(),
multiplier_per_block: Rate::saturating_from_rational(9, 1_000_000_000), // 0.047304 PerYear
jump_multiplier_per_block: Rate::saturating_from_rational(207, 1_000_000_000), // 1.09 PerYear
},
),
(
BTC,
MinterestModelData {
kink: Rate::saturating_from_rational(8, 10),
base_rate_per_block: Rate::zero(),
multiplier_per_block: Rate::saturating_from_rational(9, 1_000_000_000), // 0.047304 PerYear
jump_multiplier_per_block: Rate::saturating_from_rational(207, 1_000_000_000), // 1.09 PerYear
},
),
],
mnt_claim_threshold: 0, // disable by default
liquidation_fee: vec![
(DOT, Rate::saturating_from_rational(5, 100)),
(ETH, Rate::saturating_from_rational(5, 100)),
(BTC, Rate::saturating_from_rational(5, 100)),
(KSM, Rate::saturating_from_rational(5, 100)),
],
liquidation_threshold: Rate::saturating_from_rational(3, 100),
}
}
}
impl ExtBuilder {
pub fn set_risk_manager_params(
mut self,
liquidation_fee: Vec<(CurrencyId, Rate)>,
liquidation_threshold: Rate,
) -> Self {
self.liquidation_fee = liquidation_fee;
self.liquidation_threshold = liquidation_threshold;
self
}
pub fn set_controller_data(mut self, pools: Vec<(CurrencyId, ControllerData<BlockNumber>)>) -> Self {
self.controller_data = pools;
self
}
pub fn set_minterest_model_params(mut self, pools: Vec<(CurrencyId, MinterestModelData)>) -> Self {
self.minterest_model_params = pools;
self
}
pub fn mnt_enabled_pools(mut self, pools: Vec<(CurrencyId, Balance)>) -> Self {
self.minted_pools = pools;
self
}
pub fn user_balance(mut self, user: AccountId, currency_id: CurrencyId, balance: Balance) -> Self {
self.endowed_accounts.push((user, currency_id, balance));
self
}
pub fn pool_balance(mut self, currency_id: CurrencyId, balance: Balance) -> Self {
self.endowed_accounts
.push((TestPools::pools_account_id(), currency_id, balance));
self
}
pub fn pool_borrow_underlying(mut self, pool_id: CurrencyId, borrowed: Balance) -> Self {
self.pools.push((
pool_id,
PoolData {
borrowed,
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
));
self
}
pub fn pool_user_data(
mut self,
pool_id: CurrencyId,
user: AccountId,
borrowed: Balance,
interest_index: Rate,
is_collateral: bool,
) -> Self {
self.pool_user_data.push((
pool_id,
user,
PoolUserData {
borrowed,
interest_index,
is_collateral,
},
));
self
}
pub fn pool_initial(mut self, pool_id: CurrencyId) -> Self {
self.pools.push((
pool_id,
PoolData {
borrowed: Balance::zero(),
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
));
self
}
pub fn mnt_account_balance(mut self, balance: Balance) -> Self {
self.endowed_accounts
.push((TestMntToken::get_account_id(), MNT, balance));
self
}
pub fn mnt_claim_threshold(mut self, threshold: Balance) -> Self {
self.mnt_claim_threshold = threshold;
self
}
pub fn build(self) -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
pallet_balances::GenesisConfig::<Test> {
balances: self
.endowed_accounts
.clone()
.into_iter()
.filter(|(_, currency_id, _)| *currency_id == MNT)
.map(|(account_id, _, initial_balance)| (account_id, initial_balance))
.collect::<Vec<_>>(),
}
.assimilate_storage(&mut t)
.unwrap();
orml_tokens::GenesisConfig::<Test> {
balances: self
.endowed_accounts
.into_iter()
.filter(|(_, currency_id, _)| *currency_id != MNT)
.collect::<Vec<_>>(),
}
.assimilate_storage(&mut t)
.unwrap();
controller::GenesisConfig::<Test> {
controller_params: self.controller_data,
pause_keepers: vec![
(ETH, PauseKeeper::all_unpaused()),
(DOT, PauseKeeper::all_unpaused()),
(KSM, PauseKeeper::all_paused()),
(BTC, PauseKeeper::all_unpaused()),
],
}
.assimilate_storage(&mut t)
.unwrap();
liquidity_pools::GenesisConfig::<Test> {
pools: self.pools,
pool_user_data: self.pool_user_data,
}
.assimilate_storage(&mut t)
.unwrap();
minterest_model::GenesisConfig::<Test> {
minterest_model_params: self.minterest_model_params,
_phantom: Default::default(),
}
.assimilate_storage(&mut t)
.unwrap();
risk_manager::GenesisConfig::<Test> {
liquidation_fee: self.liquidation_fee,
liquidation_threshold: self.liquidation_threshold,
_phantom: Default::default(),
}
.assimilate_storage(&mut t)
.unwrap();
mnt_token::GenesisConfig::<Test> {
mnt_claim_threshold: self.mnt_claim_threshold,
minted_pools: self.minted_pools,
_phantom: Default::default(),
}
.assimilate_storage(&mut t)
.unwrap();
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
}
| 31.862944 | 112 | 0.703282 |
5d8ac9cf5e21da257e02b69978ca47eb82a1bdda | 7,790 | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Custom commands.
use std::any::Any;
use std::sync::Arc;
use crate::{WidgetId, WindowId};
/// An identifier for a particular command.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Selector(&'static str);
/// An arbitrary command.
///
/// A `Command` consists of a `Selector`, that indicates what the command is,
/// and an optional argument, that can be used to pass arbitrary data.
///
/// # Examples
/// ```
/// use druid::{Command, Selector};
///
/// let selector = Selector::new("process_rows");
/// let rows = vec![1, 3, 10, 12];
/// let command = Command::new(selector, rows);
///
/// assert_eq!(command.get_object(), Some(&vec![1, 3, 10, 12]));
/// ```
#[derive(Debug, Clone)]
pub struct Command {
/// The command's `Selector`.
pub selector: Selector,
object: Option<Arc<dyn Any>>,
}
/// The target of a command.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Target {
/// The target is a window; the event will be delivered to all
/// widgets in that window.
Window(WindowId),
/// The target is a specific widget.
Widget(WidgetId),
}
/// [`Command`]s with special meaning, defined by druid.
///
/// [`Command`]: struct.Command.html
pub mod sys {
use super::Selector;
/// Quit the running application. This command is handled by the druid library.
pub const QUIT_APP: Selector = Selector::new("druid-builtin.quit-app");
/// Hide the application. (mac only?)
pub const HIDE_APPLICATION: Selector = Selector::new("druid-builtin.menu-hide-application");
/// Hide all other applications. (mac only?)
pub const HIDE_OTHERS: Selector = Selector::new("druid-builtin.menu-hide-others");
/// The selector for a command to create a new window.
pub const NEW_WINDOW: Selector = Selector::new("druid-builtin.new-window");
/// The selector for a command to close a window. The command's argument
/// should be the id of the window to close.
pub const CLOSE_WINDOW: Selector = Selector::new("druid-builtin.close-window");
/// The selector for a command to bring a window to the front, and give it focus.
///
/// The command's argument should be the id of the target window.
pub const SHOW_WINDOW: Selector = Selector::new("druid-builtin.show-window");
/// Display a context (right-click) menu. The argument must be the [`ContextMenu`].
/// object to be displayed.
///
/// [`ContextMenu`]: ../struct.ContextMenu.html
pub const SHOW_CONTEXT_MENU: Selector = Selector::new("druid-builtin.show-context-menu");
/// The selector for a command to set the window's menu. The argument should
/// be a [`MenuDesc`] object.
///
/// [`MenuDesc`]: ../struct.MenuDesc.html
pub const SET_MENU: Selector = Selector::new("druid-builtin.set-menu");
/// Show the application preferences.
pub const SHOW_PREFERENCES: Selector = Selector::new("druid-builtin.menu-show-preferences");
/// Show the application about window.
pub const SHOW_ABOUT: Selector = Selector::new("druid-builtin.menu-show-about");
/// Show all applications.
pub const SHOW_ALL: Selector = Selector::new("druid-builtin.menu-show-all");
/// Show the new file dialog.
pub const NEW_FILE: Selector = Selector::new("druid-builtin.menu-file-new");
/// System command. A file picker dialog will be shown to the user, and an
/// `OPEN_FILE` command will be sent if a file is chosen.
///
/// The argument should be a [`FileDialogOptions`] struct.
///
/// [`FileDialogOptions`]: struct.FileDialogOptions.html
pub const SHOW_OPEN_PANEL: Selector = Selector::new("druid-builtin.menu-file-open");
/// Open a file.
///
/// The argument must be a [`FileInfo`] object for the file to be opened.
///
/// [`FileInfo`]: struct.FileInfo.html
pub const OPEN_FILE: Selector = Selector::new("druid-builtin.open-file-path");
/// Special command. When issued, the system will show the 'save as' panel,
/// and if a path is selected the system will issue a `SAVE_FILE` command
/// with the selected path as the argument.
///
/// The argument should be a [`FileDialogOptions`] object.
///
/// [`FileDialogOptions`]: struct.FileDialogOptions.html
pub const SHOW_SAVE_PANEL: Selector = Selector::new("druid-builtin.menu-file-save-as");
/// Save the current file.
///
/// The argument, if present, should be the path where the file should be saved.
pub const SAVE_FILE: Selector = Selector::new("druid-builtin.menu-file-save");
/// Show the print-setup window.
pub const PRINT_SETUP: Selector = Selector::new("druid-builtin.menu-file-print-setup");
/// Show the print dialog.
pub const PRINT: Selector = Selector::new("druid-builtin.menu-file-print");
/// Show the print preview.
pub const PRINT_PREVIEW: Selector = Selector::new("druid-builtin.menu-file-print");
/// Cut the current selection.
pub const CUT: Selector = Selector::new("druid-builtin.menu-cut");
/// Copy the current selection.
pub const COPY: Selector = Selector::new("druid-builtin.menu-copy");
/// Paste.
pub const PASTE: Selector = Selector::new("druid-builtin.menu-paste");
/// Undo.
pub const UNDO: Selector = Selector::new("druid-builtin.menu-undo");
/// Redo.
pub const REDO: Selector = Selector::new("druid-builtin.menu-redo");
}
impl Selector {
/// A selector that does nothing.
pub const NOOP: Selector = Selector::new("");
/// Create a new `Selector` with the given string.
pub const fn new(s: &'static str) -> Selector {
Selector(s)
}
}
impl Command {
/// Create a new `Command` with an argument. If you do not need
/// an argument, `Selector` implements `Into<Command>`.
pub fn new(selector: Selector, arg: impl Any) -> Self {
Command {
selector,
object: Some(Arc::new(arg)),
}
}
/// Return a reference to this command's object, if it has one.
pub fn get_object<T: Any>(&self) -> Option<&T> {
self.object.as_ref().and_then(|obj| obj.downcast_ref())
}
}
impl From<Selector> for Command {
fn from(selector: Selector) -> Command {
Command {
selector,
object: None,
}
}
}
impl std::fmt::Display for Selector {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Selector('{}')", self.0)
}
}
impl From<WindowId> for Target {
fn from(id: WindowId) -> Target {
Target::Window(id)
}
}
impl From<WidgetId> for Target {
fn from(id: WidgetId) -> Target {
Target::Widget(id)
}
}
impl Into<Option<Target>> for WindowId {
fn into(self) -> Option<Target> {
Some(Target::Window(self))
}
}
impl Into<Option<Target>> for WidgetId {
fn into(self) -> Option<Target> {
Some(Target::Widget(self))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn get_object() {
let sel = Selector::new("my-selector");
let objs = vec![0, 1, 2];
let command = Command::new(sel, objs);
assert_eq!(command.get_object(), Some(&vec![0, 1, 2]));
}
}
| 32.594142 | 96 | 0.647625 |
390aebf223d77a386b0c885645af4e5c25b0faa7 | 6,201 | use std::{
collections::VecDeque,
io::{self, Write},
pin::Pin,
task::{
Context,
Poll::{self, Pending, Ready},
},
};
use actix_codec::*;
use bytes::{Buf as _, BufMut as _, BytesMut};
use futures_sink::Sink;
use tokio_test::{assert_ready, task};
macro_rules! bilateral {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Bilateral { calls: v }
}};
}
macro_rules! assert_ready {
($e:expr) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => panic!("pending"),
}
}};
($e:expr, $($msg:tt),+) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => {
let msg = format_args!($($msg),+);
panic!("pending; {}", msg)
}
}
}};
}
#[derive(Debug)]
pub struct Bilateral {
pub calls: VecDeque<io::Result<Vec<u8>>>,
}
impl Write for Bilateral {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
match self.calls.pop_front() {
Some(Ok(data)) => {
assert!(src.len() >= data.len());
assert_eq!(&data[..], &src[..data.len()]);
Ok(data.len())
}
Some(Err(err)) => Err(err),
None => panic!("unexpected write; {:?}", src),
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncWrite for Bilateral {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match Pin::get_mut(self).write(buf) {
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
match Pin::get_mut(self).flush() {
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
unimplemented!()
}
}
impl AsyncRead for Bilateral {
fn poll_read(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
use io::ErrorKind::WouldBlock;
match self.calls.pop_front() {
Some(Ok(data)) => {
debug_assert!(buf.remaining() >= data.len());
buf.put_slice(&data);
Ready(Ok(()))
}
Some(Err(ref err)) if err.kind() == WouldBlock => Pending,
Some(Err(err)) => Ready(Err(err)),
None => Ready(Ok(())),
}
}
}
pub struct U32;
impl Encoder<u32> for U32 {
type Error = io::Error;
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(4);
dst.put_u32(item);
Ok(())
}
}
impl Decoder for U32 {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 4 {
return Ok(None);
}
let n = buf.split_to(4).get_u32();
Ok(Some(n))
}
}
#[test]
fn test_write_hits_highwater_mark() {
// see here for what this test is based on:
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
const ITER: usize = 2 * 1024;
let mut bi = bilateral! {
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
Ok(b"".to_vec()),
};
for i in 0..=ITER {
let mut b = BytesMut::with_capacity(4);
b.put_u32(i as u32);
// Append to the end
match bi.calls.back_mut().unwrap() {
Ok(ref mut data) => {
// Write in 2kb chunks
if data.len() < ITER {
data.extend_from_slice(&b[..]);
continue;
} // else fall through and create a new buffer
}
_ => unreachable!(),
}
// Push a new new chunk
bi.calls.push_back(Ok(b[..].to_vec()));
}
assert_eq!(bi.calls.len(), 6);
let mut framed = Framed::new(bi, U32);
// Send 8KB. This fills up FramedWrite2 buffer
let mut task = task::spawn(());
task.enter(|cx, _| {
// Send 8KB. This fills up Framed buffer
for i in 0..ITER {
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// write the buffer
assert!(framed.start_send(i as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Now we poll_ready which forces a flush. The bilateral pops the front message
// and decides to block.
assert!(framed.poll_ready(cx).is_pending());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// We poll again, forcing another flush, which this time succeeds
// The whole 8KB buffer is flushed
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Send more data. This matches the final message expected by the bilateral
assert!(framed.start_send(ITER as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Flush the rest of the buffer
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
}
// Ensure the mock is empty
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
});
}
| 27.438053 | 123 | 0.492985 |
1dbce1e3930119223809a5d117a985c6d831ffbe | 4,010 | use {
crate::parse_instruction::{
check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum,
},
serde_json::json,
safecoin_sdk::{instruction::CompiledInstruction, pubkey::Pubkey},
};
// A helper function to convert safe_associated_token_account_v1_0::id() as spl_sdk::pubkey::Pubkey
// to safecoin_sdk::pubkey::Pubkey
pub fn spl_associated_token_id_v1_0() -> Pubkey {
Pubkey::new_from_array(safe_associated_token_account_v1_0::id().to_bytes())
}
pub fn parse_associated_token(
instruction: &CompiledInstruction,
account_keys: &[Pubkey],
) -> Result<ParsedInstructionEnum, ParseInstructionError> {
match instruction.accounts.iter().max() {
Some(index) if (*index as usize) < account_keys.len() => {}
_ => {
// Runtime should prevent this from ever happening
return Err(ParseInstructionError::InstructionKeyMismatch(
ParsableProgram::SafeAssociatedTokenAccount,
));
}
}
check_num_associated_token_accounts(&instruction.accounts, 7)?;
Ok(ParsedInstructionEnum {
instruction_type: "create".to_string(),
info: json!({
"source": account_keys[instruction.accounts[0] as usize].to_string(),
"account": account_keys[instruction.accounts[1] as usize].to_string(),
"wallet": account_keys[instruction.accounts[2] as usize].to_string(),
"mint": account_keys[instruction.accounts[3] as usize].to_string(),
"systemProgram": account_keys[instruction.accounts[4] as usize].to_string(),
"tokenProgram": account_keys[instruction.accounts[5] as usize].to_string(),
"rentSysvar": account_keys[instruction.accounts[6] as usize].to_string(),
}),
})
}
fn check_num_associated_token_accounts(
accounts: &[u8],
num: usize,
) -> Result<(), ParseInstructionError> {
check_num_accounts(accounts, num, ParsableProgram::SafeAssociatedTokenAccount)
}
#[cfg(test)]
mod test {
use {
super::*,
safe_associated_token_account_v1_0::{
create_associated_token_account,
safecoin_program::{
instruction::CompiledInstruction as SplAssociatedTokenCompiledInstruction,
message::Message, pubkey::Pubkey as SplAssociatedTokenPubkey,
},
},
};
fn convert_pubkey(pubkey: Pubkey) -> SplAssociatedTokenPubkey {
SplAssociatedTokenPubkey::new_from_array(pubkey.to_bytes())
}
fn convert_compiled_instruction(
instruction: &SplAssociatedTokenCompiledInstruction,
) -> CompiledInstruction {
CompiledInstruction {
program_id_index: instruction.program_id_index,
accounts: instruction.accounts.clone(),
data: instruction.data.clone(),
}
}
#[test]
fn test_parse_associated_token() {
let mut keys: Vec<Pubkey> = vec![];
for _ in 0..7 {
keys.push(safecoin_sdk::pubkey::new_rand());
}
let create_ix = create_associated_token_account(
&convert_pubkey(keys[0]),
&convert_pubkey(keys[1]),
&convert_pubkey(keys[2]),
);
let message = Message::new(&[create_ix], None);
let compiled_instruction = convert_compiled_instruction(&message.instructions[0]);
assert_eq!(
parse_associated_token(&compiled_instruction, &keys).unwrap(),
ParsedInstructionEnum {
instruction_type: "create".to_string(),
info: json!({
"source": keys[0].to_string(),
"account": keys[1].to_string(),
"wallet": keys[2].to_string(),
"mint": keys[3].to_string(),
"systemProgram": keys[4].to_string(),
"tokenProgram": keys[5].to_string(),
"rentSysvar": keys[6].to_string(),
})
}
);
}
}
| 37.12963 | 99 | 0.620698 |
5602e54bd42e264be6147f7b8e51bfc4b1f6d262 | 534 | use vial::Method;
#[test]
fn converts_from_str() {
assert_eq!(Method::from("GET"), Method::GET);
assert_eq!(Method::from("HEAD"), Method::HEAD);
assert_eq!(Method::from("POST"), Method::POST);
assert_eq!(Method::from("PUT"), Method::PUT);
assert_eq!(Method::from("DELETE"), Method::DELETE);
assert_eq!(Method::from("PATCH"), Method::PATCH);
assert_eq!(Method::from("OPTIONS"), Method::OPTIONS);
assert_eq!(Method::from("TRACE"), Method::TRACE);
assert_eq!(Method::from("INVALID"), Method::GET);
}
| 35.6 | 57 | 0.646067 |
092bf00ddd6a4fd91bd309a8264eb9417ead3ae2 | 1,004 | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
use std::mem;
trait Trait1<T> {}
trait Trait2<'a> {
type Ty;
}
fn _ice(param: Box<for <'a> Trait1<<() as Trait2<'a>>::Ty>>) {
let _e: (usize, usize) = unsafe{mem::transmute(param)};
}
trait Lifetime<'a> {
type Out;
}
impl<'a> Lifetime<'a> for () {
type Out = &'a ();
}
fn foo<'a>(x: &'a ()) -> <() as Lifetime<'a>>::Out {
x
}
fn takes_lifetime(_f: for<'a> fn(&'a ()) -> <() as Lifetime<'a>>::Out) {
}
#[rustc_error]
fn main() { //~ ERROR compilation successful
takes_lifetime(foo);
}
| 24.487805 | 72 | 0.643426 |
115ac469d1d4e71977f05f35a406e0f4d8daa56d | 855 | use ron::value::Value;
use serde::Serialize;
fn main() {
let data = r#"
Scene( // class name is optional
materials: { // this is a map
"metal": (
reflectivity: 1.0,
),
"plastic": (
reflectivity: 0.5,
),
},
entities: [ // this is an array
(
name: "hero",
material: "metal",
),
(
name: "monster",
material: "plastic",
),
],
)
"#;
let value: Value = data.parse().expect("Failed to deserialize");
let mut ser = serde_json::Serializer::pretty(std::io::stdout());
value.serialize(&mut ser).expect("Failed to serialize");
}
| 26.71875 | 68 | 0.390643 |
097eb74f2d6569792a2cfd4a36a8a3575636cd50 | 22,426 | //! Working with GPIO pins.
//! The pins are associated with the PORT hardware. This module
//! defines a `split` method on the `PORT` type that is used to safely
//! reference the individual pin configuration.
//! The IO pins can be switched into alternate function modes, which
//! routes the pins to different peripherals depending on the mode
//! for the pin. The pin configuration is reflected through the
//! use of type states to make the interface (ideally, or at least practically)
//! impossible to misuse.
use crate::target_device::port::group::{DIRCLR, DIRSET, OUTCLR, OUTSET, PINCFG, PMUX};
use crate::target_device::PORT;
use core::marker::PhantomData;
use hal::digital::v2::OutputPin;
#[cfg(feature = "unproven")]
use hal::digital::v2::{InputPin, StatefulOutputPin, ToggleableOutputPin};
/// The GpioExt trait allows splitting the PORT hardware into
/// its constituent pin parts.
pub trait GpioExt {
type Parts;
/// Consume and split the device into its constitent parts
fn split(self) -> Self::Parts;
}
/// Represents a pin configured for input.
/// The MODE type is typically one of `Floating`, `PullDown` or
/// `PullUp`.
pub struct Input<MODE> {
_mode: PhantomData<MODE>,
}
/// Represents a pin configured for output.
/// The MODE type is typically one of `PushPull`, or
/// `OpenDrain`.
pub struct Output<MODE> {
_mode: PhantomData<MODE>,
}
// The following collection of types is used to encode the
// state of the pin at compile time and helps to avoid misuse.
/// Floating Input
pub struct Floating;
/// Pulled down Input
pub struct PullDown;
/// Pulled up Input
pub struct PullUp;
/// Totem Pole aka Push-Pull
pub struct PushPull;
/// Open drain output
pub struct OpenDrain;
/// Open drain output, which can be read when not driven
pub struct ReadableOpenDrain;
/// Peripheral Function A
pub struct PfA;
/// Peripheral Function B
pub struct PfB;
/// Peripheral Function C
pub struct PfC;
/// Peripheral Function D
pub struct PfD;
/// Peripheral Function E
pub struct PfE;
/// Peripheral Function F
pub struct PfF;
/// Peripheral Function G
pub struct PfG;
/// Peripheral Function H
pub struct PfH;
/// Peripheral Function I
pub struct PfI;
/// Peripheral Function J
pub struct PfJ;
/// Peripheral Function K
pub struct PfK;
/// Peripheral Function L
pub struct PfL;
/// Peripheral Function M
pub struct PfM;
/// Peripheral Function N
pub struct PfN;
/// A trait that makes it easier to generically manage
/// converting a pin from its current state into some
/// other functional mode. The configuration change
/// requires exclusive access to the Port hardware,
/// which is why this isn't simply the standard `Into`
/// trait.
pub trait IntoFunction<T> {
/// Consume the pin and configure it to operate in
/// the mode T.
fn into_function(self, port: &mut Port) -> T;
}
// rustfmt wants to keep indenting the nested macro on each run,
// so disable it for this whole block :-/
#[rustfmt::skip]
macro_rules! pin {
(
$PinType:ident,
$pin_ident:ident,
$pin_no:expr,
$group:ident,
$dirset:ident,
$dirclr:ident,
$pincfg:ident,
$outset:ident,
$outclr:ident,
$pinmux:ident,
$out:ident
) => {
// Helper for pmux peripheral function configuration
macro_rules! function {
($FuncType:ty, $func_ident:ident, $variant:expr) => {
impl<MODE> $PinType<MODE> {
/// Configures the pin to operate with a peripheral
pub fn $func_ident(
self,
port: &mut Port
) -> $PinType<$FuncType> {
port.$pinmux()[$pin_no >> 1].modify(|_, w| {
if $pin_no & 1 == 1 {
// Odd-numbered pin
unsafe { w.pmuxo().bits($variant) }
} else {
// Even-numbered pin
unsafe { w.pmuxe().bits($variant) }
}
});
port.$pincfg()[$pin_no].modify(|_, bits| {
bits.pmuxen().set_bit()
});
$PinType { _mode: PhantomData }
}
}
impl<MODE> IntoFunction<$PinType<$FuncType>> for $PinType<MODE> {
fn into_function(self, port: &mut Port) -> $PinType<$FuncType> {
self.$func_ident(port)
}
}
};
}
/// Represents the IO pin with the matching name.
pub struct $PinType<MODE> {
_mode: PhantomData<MODE>,
}
function!(PfA, into_function_a, 0);
function!(PfB, into_function_b, 1);
function!(PfC, into_function_c, 2);
function!(PfD, into_function_d, 3);
function!(PfE, into_function_e, 4);
function!(PfF, into_function_f, 5);
function!(PfG, into_function_g, 6);
function!(PfH, into_function_h, 7);
function!(PfI, into_function_i, 8);
function!(PfJ, into_function_j, 9);
function!(PfK, into_function_k, 10);
function!(PfL, into_function_l, 11);
function!(PfM, into_function_m, 12);
function!(PfN, into_function_n, 13);
impl<MODE> $PinType<MODE> {
/// Configures the pin to operate as a floating input
pub fn into_floating_input(self, port: &mut Port) -> $PinType<Input<Floating>> {
port.$dirclr().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().set_bit();
bits.pullen().clear_bit();
bits.drvstr().clear_bit();
bits
});
$PinType { _mode: PhantomData }
}
/// Configures the pin to operate as a pulled down input pin
pub fn into_pull_down_input(self, port: &mut Port) -> $PinType<Input<PullDown>> {
port.$dirclr().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().set_bit();
bits.pullen().set_bit();
bits.drvstr().clear_bit();
bits
});
// Pull down
port.$outclr().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
$PinType { _mode: PhantomData }
}
/// Configures the pin to operate as a pulled up input pin
pub fn into_pull_up_input(self, port: &mut Port) -> $PinType<Input<PullUp>> {
port.$dirclr().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().set_bit();
bits.pullen().set_bit();
bits.drvstr().clear_bit();
bits
});
// Pull up
port.$outset().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
$PinType { _mode: PhantomData }
}
/// Configures the pin to operate as an open drain output
pub fn into_open_drain_output(self, port: &mut Port) -> $PinType<Output<OpenDrain>> {
port.$dirset().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().clear_bit();
bits.pullen().clear_bit();
bits.drvstr().clear_bit();
bits
});
$PinType { _mode: PhantomData }
}
/// Configures the pin to operate as an open drain output which can be read
pub fn into_readable_open_drain_output(self, port: &mut Port) -> $PinType<Output<ReadableOpenDrain>> {
port.$dirset().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().set_bit();
bits.pullen().clear_bit();
bits.drvstr().clear_bit();
bits
});
$PinType { _mode: PhantomData }
}
/// Configures the pin to operate as a push-pull output
pub fn into_push_pull_output(self, port: &mut Port) -> $PinType<Output<PushPull>> {
port.$dirset().write(|bits| unsafe {
bits.bits(1 << $pin_no);
bits
});
port.$pincfg()[$pin_no].write(|bits| {
bits.pmuxen().clear_bit();
bits.inen().set_bit();
bits.pullen().clear_bit();
bits.drvstr().clear_bit();
bits
});
$PinType { _mode: PhantomData }
}
}
impl $PinType<Output<OpenDrain>> {
/// Control state of the internal pull up
pub fn internal_pull_up(&mut self, port: &mut Port, on: bool) {
port.$pincfg()[$pin_no].write(|bits| {
if on {
bits.pullen().set_bit();
} else {
bits.pullen().clear_bit();
}
bits
});
}
}
impl<MODE> $PinType<Output<MODE>> {
/// Toggle the logic level of the pin; if it is currently
/// high, set it low and vice versa.
pub fn toggle(&mut self) {
self.toggle_impl();
}
fn toggle_impl(&mut self) {
unsafe {
(*PORT::ptr()).$group.outtgl.write(|bits| {
bits.bits(1 << $pin_no);
bits
});
}
}
}
#[cfg(feature = "unproven")]
impl<MODE> ToggleableOutputPin for $PinType<Output<MODE>> {
// TODO: switch to ! when it’s stable
type Error = ();
fn toggle(&mut self) -> Result<(), Self::Error> {
self.toggle_impl();
Ok(())
}
}
#[cfg(feature = "unproven")]
impl InputPin for $PinType<Output<ReadableOpenDrain>> {
// TODO: switch to ! when it’s stable
type Error = ();
fn is_high(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.in_.read().bits()) & (1 << $pin_no)) != 0 })
}
fn is_low(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.in_.read().bits()) & (1 << $pin_no)) == 0 })
}
}
#[cfg(feature = "unproven")]
impl<MODE> InputPin for $PinType<Input<MODE>> {
// TODO: switch to ! when it’s stable
type Error = ();
fn is_high(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.in_.read().bits()) & (1 << $pin_no)) != 0 })
}
fn is_low(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.in_.read().bits()) & (1 << $pin_no)) == 0 })
}
}
#[cfg(feature = "unproven")]
impl<MODE> StatefulOutputPin for $PinType<Output<MODE>> {
fn is_set_high(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.out.read().bits()) & (1 << $pin_no)) != 0 })
}
fn is_set_low(&self) -> Result<bool, Self::Error> {
Ok(unsafe { (((*PORT::ptr()).$group.out.read().bits()) & (1 << $pin_no)) == 0 })
}
}
impl<MODE> OutputPin for $PinType<Output<MODE>> {
// TODO: switch to ! when it’s stable
type Error = ();
fn set_high(&mut self) -> Result<(), Self::Error> {
unsafe {
(*PORT::ptr()).$group.outset.write(|bits| {
bits.bits(1 << $pin_no);
bits
});
}
Ok(())
}
fn set_low(&mut self) -> Result<(), Self::Error> {
unsafe {
(*PORT::ptr()).$group.outclr.write(|bits| {
bits.bits(1 << $pin_no);
bits
});
}
Ok(())
}
}
};
}
/// Opaque port reference
pub struct Port {
_0: (),
}
impl Port {
fn dirset0(&mut self) -> &DIRSET {
unsafe { &(*PORT::ptr()).group0.dirset }
}
fn dirclr0(&mut self) -> &DIRCLR {
unsafe { &(*PORT::ptr()).group0.dirclr }
}
fn pincfg0(&mut self) -> &[PINCFG; 32] {
unsafe { &(*PORT::ptr()).group0.pincfg }
}
fn outset0(&mut self) -> &OUTSET {
unsafe { &(*PORT::ptr()).group0.outset }
}
fn outclr0(&mut self) -> &OUTCLR {
unsafe { &(*PORT::ptr()).group0.outclr }
}
fn pmux0(&mut self) -> &[PMUX; 16] {
unsafe { &(*PORT::ptr()).group0.pmux }
}
fn dirset1(&mut self) -> &DIRSET {
unsafe { &(*PORT::ptr()).group1.dirset }
}
fn dirclr1(&mut self) -> &DIRCLR {
unsafe { &(*PORT::ptr()).group1.dirclr }
}
fn pincfg1(&mut self) -> &[PINCFG; 32] {
unsafe { &(*PORT::ptr()).group1.pincfg }
}
fn outset1(&mut self) -> &OUTSET {
unsafe { &(*PORT::ptr()).group1.outset }
}
fn outclr1(&mut self) -> &OUTCLR {
unsafe { &(*PORT::ptr()).group1.outclr }
}
fn pmux1(&mut self) -> &[PMUX; 16] {
unsafe { &(*PORT::ptr()).group1.pmux }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn dirset2(&mut self) -> &DIRSET {
unsafe { &(*PORT::ptr()).group2.dirset }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn dirclr2(&mut self) -> &DIRCLR {
unsafe { &(*PORT::ptr()).group2.dirclr }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn pincfg2(&mut self) -> &[PINCFG; 32] {
unsafe { &(*PORT::ptr()).group2.pincfg }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn outset2(&mut self) -> &OUTSET {
unsafe { &(*PORT::ptr()).group2.outset }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn outclr2(&mut self) -> &OUTCLR {
unsafe { &(*PORT::ptr()).group2.outclr }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
fn pmux2(&mut self) -> &[PMUX; 16] {
unsafe { &(*PORT::ptr()).group2.pmux }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn dirset3(&mut self) -> &DIRSET {
unsafe { &(*PORT::ptr()).group3.dirset }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn dirclr3(&mut self) -> &DIRCLR {
unsafe { &(*PORT::ptr()).group3.dirclr }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn pincfg3(&mut self) -> &[PINCFG; 32] {
unsafe { &(*PORT::ptr()).group3.pincfg }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn outset3(&mut self) -> &OUTSET {
unsafe { &(*PORT::ptr()).group3.outset }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn outclr3(&mut self) -> &OUTCLR {
unsafe { &(*PORT::ptr()).group3.outclr }
}
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
fn pmux3(&mut self) -> &[PMUX; 16] {
unsafe { &(*PORT::ptr()).group3.pmux }
}
}
macro_rules! port {
([
$($PinTypeA:ident: ($pin_identA:ident, $pin_noA:expr),)+
],[
$($PinTypeB:ident: ($pin_identB:ident, $pin_noB:expr),)+
],[
$($PinTypeC:ident: ($pin_identC:ident, $pin_noC:expr),)+
],[
$($PinTypeD:ident: ($pin_identD:ident, $pin_noD:expr),)+
]) => {
/// Holds the GPIO Port peripheral and broken out pin instances
pub struct Parts {
/// Opaque port reference
pub port: Port,
$(
/// Pin $pin_identA
pub $pin_identA: $PinTypeA<Input<Floating>>,
)+
$(
/// Pin $pin_identB
pub $pin_identB: $PinTypeB<Input<Floating>>,
)+
$(
/// Pin $pin_identC
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
pub $pin_identC: $PinTypeC<Input<Floating>>,
)+
$(
/// Pin $pin_identD
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
pub $pin_identD: $PinTypeD<Input<Floating>>,
)+
}
impl GpioExt for PORT {
type Parts = Parts;
/// Split the PORT peripheral into discrete pins
fn split(self) -> Parts {
Parts {
port: Port {_0: ()},
$(
$pin_identA: $PinTypeA { _mode: PhantomData },
)+
$(
$pin_identB: $PinTypeB { _mode: PhantomData },
)+
$(
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
$pin_identC: $PinTypeC { _mode: PhantomData },
)+
$(
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
$pin_identD: $PinTypeD { _mode: PhantomData },
)+
}
}
}
$(
pin!($PinTypeA, $pin_identA, $pin_noA, group0, dirset0, dirclr0,
pincfg0, outset0, outclr0, pmux0, out0);
)+
$(
pin!($PinTypeB, $pin_identB, $pin_noB, group1, dirset1, dirclr1,
pincfg1, outset1, outclr1, pmux1, out1);
)+
$(
#[cfg(any(feature = "samd51p19a", feature = "samd51n20a", feature = "samd51p20a", feature = "same54"))]
pin!($PinTypeC, $pin_identC, $pin_noC, group2, dirset2, dirclr2,
pincfg2, outset2, outclr2, pmux2, out2);
)+
$(
#[cfg(any(feature = "samd51p19a", feature = "samd51p20a", feature = "same54"))]
pin!($PinTypeD, $pin_identD, $pin_noD, group3, dirset3, dirclr3,
pincfg3, outset3, outclr3, pmux3, out3);
)+
};
}
port!([
Pa0: (pa0, 0),
Pa1: (pa1, 1),
Pa2: (pa2, 2),
Pa3: (pa3, 3),
Pa4: (pa4, 4),
Pa5: (pa5, 5),
Pa6: (pa6, 6),
Pa7: (pa7, 7),
Pa8: (pa8, 8),
Pa9: (pa9, 9),
Pa10: (pa10, 10),
Pa11: (pa11, 11),
Pa12: (pa12, 12),
Pa13: (pa13, 13),
Pa14: (pa14, 14),
Pa15: (pa15, 15),
Pa16: (pa16, 16),
Pa17: (pa17, 17),
Pa18: (pa18, 18),
Pa19: (pa19, 19),
Pa20: (pa20, 20),
Pa21: (pa21, 21),
Pa22: (pa22, 22),
Pa23: (pa23, 23),
Pa24: (pa24, 24),
Pa25: (pa25, 25),
Pa26: (pa26, 26),
Pa27: (pa27, 27),
Pa28: (pa28, 28),
Pa29: (pa29, 29),
Pa30: (pa30, 30),
Pa31: (pa31, 31),
],[
Pb0: (pb0, 0),
Pb1: (pb1, 1),
Pb2: (pb2, 2),
Pb3: (pb3, 3),
Pb4: (pb4, 4),
Pb5: (pb5, 5),
Pb6: (pb6, 6),
Pb7: (pb7, 7),
Pb8: (pb8, 8),
Pb9: (pb9, 9),
Pb10: (pb10, 10),
Pb11: (pb11, 11),
Pb12: (pb12, 12),
Pb13: (pb13, 13),
Pb14: (pb14, 14),
Pb15: (pb15, 15),
Pb16: (pb16, 16),
Pb17: (pb17, 17),
Pb18: (pb18, 18),
Pb19: (pb19, 19),
Pb20: (pb20, 20),
Pb21: (pb21, 21),
Pb22: (pb22, 22),
Pb23: (pb23, 23),
Pb24: (pb24, 24),
Pb25: (pb25, 25),
Pb26: (pb26, 26),
Pb27: (pb27, 27),
Pb28: (pb28, 28),
Pb29: (pb29, 29),
Pb30: (pb30, 30),
Pb31: (pb31, 31),
],
[
Pc0: (pc0, 0),
Pc1: (pc1, 1),
Pc2: (pc2, 2),
Pc3: (pc3, 3),
Pc4: (pc4, 4),
Pc5: (pc5, 5),
Pc6: (pc6, 6),
Pc7: (pc7, 7),
Pc10: (pc10, 10),
Pc11: (pc11, 11),
Pc12: (pc12, 12),
Pc13: (pc13, 13),
Pc14: (pc14, 14),
Pc15: (pc15, 15),
Pc16: (pc16, 16),
Pc17: (pc17, 17),
Pc18: (pc18, 18),
Pc19: (pc19, 19),
Pc20: (pc20, 20),
Pc21: (pc21, 21),
Pc22: (pc22, 22),
Pc23: (pc23, 23),
Pc24: (pc24, 24),
Pc25: (pc25, 25),
Pc26: (pc26, 26),
Pc27: (pc27, 27),
Pc28: (pc28, 28),
Pc30: (pc30, 30),
Pc31: (pc31, 31),
],
[
Pd0: (pd0, 0),
Pd1: (pd1, 1),
Pd8: (pd8, 8),
Pd9: (pd9, 9),
Pd10: (pd10, 10),
Pd11: (pd11, 11),
Pd12: (pd12, 12),
Pd20: (pd20, 20),
Pd21: (pd21, 21),
]);
/// This macro is a helper for defining a `Pins` type in a board support
/// crate. This type is used to provide more meaningful aliases for the
/// various GPIO pins for a given board.
#[macro_export]
macro_rules! define_pins {
($(#[$topattr:meta])* struct $Type:ident,
target_device: $target_device:ident,
$( $(#[$attr:meta])* pin $name:ident = $pin_ident:ident),+ , ) => {
$crate::paste::item! {
$(#[$topattr])*
pub struct $Type {
/// Opaque port reference
pub port: Port,
$(
$(#[$attr])*
pub $name: gpio::[<P $pin_ident>]<Input<Floating>>
),+
}
}
impl $Type {
/// Returns the pins for the device
$crate::paste::item! {
pub fn new(port: $target_device::PORT) -> Self {
let pins = port.split();
$Type {
port: pins.port,
$(
$name: pins.[<p $pin_ident>]
),+
}
}
}
}
}}
| 30.594816 | 119 | 0.497503 |
e4658989dc49df844b74c2a5270946ba8147d917 | 2,238 | #![crate_name = "uu_pwd"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::env;
use std::path::{Path, PathBuf};
use std::io;
static NAME: &str = "pwd";
static VERSION: &str = env!("CARGO_PKG_VERSION");
pub fn absolute_path(path: &Path) -> io::Result<PathBuf> {
let path_buf = path.canonicalize()?;
#[cfg(windows)]
let path_buf = Path::new(
path_buf
.as_path()
.to_string_lossy()
.trim_left_matches(r"\\?\"),
).to_path_buf();
Ok(path_buf)
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
opts.optflag(
"L",
"logical",
"use PWD from environment, even if it contains symlinks",
);
opts.optflag("P", "physical", "avoid all symlinks");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "Invalid options\n{}", f),
};
if matches.opt_present("help") {
let msg = format!(
"{0} {1}
Usage:
{0} [OPTION]...
Print the full filename of the current working directory.",
NAME, VERSION
);
print!("{}", opts.usage(&msg));
} else if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
} else {
match env::current_dir() {
Ok(logical_path) => {
if matches.opt_present("logical") {
println!("{}", logical_path.display());
} else {
match absolute_path(&logical_path) {
Ok(physical_path) => println!("{}", physical_path.display()),
Err(e) => crash!(1, "failed to get absolute path {}", e),
};
}
}
Err(e) => crash!(1, "failed to get current directory {}", e),
};
}
0
}
| 26.023256 | 85 | 0.537087 |
ff5a3966d8aa9e3a0e376d6ebfd06f042d291cb7 | 2,416 | ///! Token generation
use std::iter::*;
use crypto::bcrypt::bcrypt;
pub struct TokenGenerator {
/// Salt for bcrypt
salt: Vec<u8>,
/// bcrypt cost factor, defaults to 10
bcrypt_cost: u32,
// length of a tokens valid time in seconds
pub valid_duration_secs: i64,
}
impl TokenGenerator {
pub fn new(valid_duration_secs: i64, salt: Vec<u8>) -> TokenGenerator {
TokenGenerator {
salt: salt,
bcrypt_cost: 10,
valid_duration_secs: valid_duration_secs
}
}
/// Return (from, to, token)
pub fn generate_token(&self, username: &str, at_time: i64) -> (i64, i64, String) {
let timeslot = at_time - (at_time % self.valid_duration_secs);
let input = format!("{}{}", username, timeslot);
let token = self.make_hash_token(&input.as_bytes());
return (timeslot, timeslot + self.valid_duration_secs, token)
}
#[inline(always)]
pub fn generate_token_norm(&self, username: &str, at_time: i64) -> (i64, i64, String) {
let (valid_from, valid_to, tok) = self.generate_token(username, at_time);
return (valid_from, valid_to, normalize_token(tok.as_str()));
}
fn make_hash_token(&self, input: &[u8]) -> String {
let mut out = [0u8; 24];
bcrypt(self.bcrypt_cost, &self.salt, input, &mut out);
let fold_func = { |acc, &e| acc ^ e };
return format!("{:02X}-{:02X}-{:02X}",
out[0..7].into_iter().fold(0xff, &fold_func),
out[8..15].into_iter().fold(0xff, &fold_func),
out[16..23].into_iter().fold(0xff, &fold_func))
}
}
pub fn normalize_token(token: &str) -> String {
token.to_lowercase().chars().filter(|c| c.is_digit(16)).collect::<String>()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_normalize_token() {
assert_eq!(normalize_token(&"7A-74-F4".to_string()), "7a74f4");
}
#[test]
fn test_generate_token() {
use time;
let tg = TokenGenerator::new(time::Duration::hours(2).num_seconds(),
vec!(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16));
let (valid_from, valid_until, result) = tg.generate_token("a", 99999999);
assert_eq!( valid_from, 99993600);
assert_eq!( valid_until, 100000800);
assert_eq!( result, "7A-74-F4");
}
} | 32.648649 | 98 | 0.579884 |
1ef7b1118f3b98ce7747ecf737de8232c055babf | 4,092 | //! Logger that sends logs to local syslog daemon. Unix-like platforms only.
//! Uses the [POSIX syslog API].
//!
//! [POSIX syslog API]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/closelog.html
//!
//! # Concurrency issues
//!
//! POSIX doesn't support opening more than one connection to syslogd at a
//! time. Although it is safe to construct more than one logger using this
//! module at the same time, some of the settings for syslog loggers will be
//! overwritten by the settings for additional syslog loggers created later.
//!
//! For this reason, the following rules should be followed:
//!
//! * Libraries should not use this module or otherwise call
//! `openlog` unless specifically told to do so by the main application.
//! * An application that uses this module should not cause `openlog` to be
//! called from anywhere else.
//! * An application should not use this module to construct more than one
//! `Logger` at the same time, except when constructing a new `Logger` that
//! is to replace an old one (for instance, if the application is reloading
//! its configuration files and reinitializing its logging pipeline).
//!
//! Failure to abide by these rules may result in `closelog` being called at
//! the wrong time. This will cause `openlog` settings (application name,
//! syslog facility, and some flags) to be reset, and there may be a delay in
//! processing the next log message after that (because the connection to the
//! syslog server, if applicable, must be reopened).
// TODO: Some systems (including OpenBSD and Android) have reentrant versions
// of the POSIX syslog functions. These systems *do* support opening multiple
// connections to syslog, and therefore do not suffer from the above
// concurrency issues. Perhaps this crate should use the reentrant syslog API
// on those platforms.
// # Design and rationale
//
// (This section is not part of the documentation for this module. It's only a
// source code comment.)
//
// This module uses the POSIX syslog API to submit log entries to the local
// syslogd. This is unlike the `syslog` crate, which connects to `/dev/log`
// or `/var/run/log` directly. The reasons for this approach, despite the above
// drawbacks, are as follows.
//
// ## Portability
//
// POSIX only specifies the `syslog` function and related functions.
//
// POSIX does not specify that a Unix-domain socket is used for submitting log
// messages to syslogd, nor the socket's path, nor the protocol used on that
// socket. The path of the socket is different on different systems:
//
// * `/dev/log` – original BSD, OpenBSD, Linux
// * `/var/run/log` – FreeBSD and NetBSD (but on Linux with systemd, this
// is a folder)
// * `/var/run/syslog` – Darwin/macOS
//
// The protocol spoken on the socket is not formally specified. It is
// whatever the system's `syslog` function writes to it, which may of course
// vary between systems. It is typically different from IETF RFCs 3164 and
// 5424.
//
// The OpenBSD kernel has a dedicated system call for submitting log messages.
// `/dev/log` is still available, but not preferred.
//
// On macOS, the `syslog` function submits log entries to the Apple System Log
// service. BSD-style log messages are accepted on `/var/run/syslog`, but that
// is not preferred.
//
// ## Reliability
//
// On every platform that has a `syslog` function, it is battle-tested and
// very definitely works.
//
// ## Simplicity
//
// Even in “classic” implementations of the POSIX `syslog` function, there are
// a number of details that it keeps track of:
//
// * Opening the socket
// * Reopening the socket when necessary
// * Formatting log messages for consumption by syslogd
// * Determining the name of the process, when none is specified by the
// application
//
// By calling the POSIX function, we avoid needing to reimplement all this in
// Rust.
#![cfg(unix)]
mod builder;
pub use builder::*;
mod config;
pub use config::*;
mod drain;
use drain::*;
mod facility;
pub use facility::*;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub mod format;
| 37.2 | 96 | 0.725562 |
d7de1eb2a9ca927ee0661befa834457cd5e14257 | 2,779 | use crate::chunked_array::RevMapping;
use crate::prelude::*;
use arrow::array::DictionaryArray;
use arrow::compute::cast::cast;
impl From<&CategoricalChunked> for DictionaryArray<u32> {
fn from(ca: &CategoricalChunked) -> Self {
let ca = ca.rechunk();
let keys = ca.downcast_iter().next().unwrap();
let map = &**ca.categorical_map.as_ref().unwrap();
match map {
RevMapping::Local(arr) => {
DictionaryArray::from_data(keys.clone(), Arc::new(arr.clone()))
}
RevMapping::Global(reverse_map, values, _uuid) => {
let iter = keys
.into_iter()
.map(|opt_k| opt_k.map(|k| *reverse_map.get(k).unwrap()));
let keys = PrimitiveArray::from_trusted_len_iter(iter);
DictionaryArray::from_data(keys, Arc::new(values.clone()))
}
}
}
}
impl From<&CategoricalChunked> for DictionaryArray<i64> {
fn from(ca: &CategoricalChunked) -> Self {
let ca = ca.rechunk();
let keys = ca.downcast_iter().next().unwrap();
let map = &**ca.categorical_map.as_ref().unwrap();
match map {
RevMapping::Local(arr) => DictionaryArray::from_data(
cast(keys, &ArrowDataType::Int64)
.unwrap()
.as_any()
.downcast_ref::<PrimitiveArray<i64>>()
.unwrap()
.clone(),
Arc::new(arr.clone()),
),
RevMapping::Global(reverse_map, values, _uuid) => {
let iter = keys
.into_iter()
.map(|opt_k| opt_k.map(|k| *reverse_map.get(k).unwrap() as i64));
let keys = PrimitiveArray::from_trusted_len_iter(iter);
DictionaryArray::from_data(keys, Arc::new(values.clone()))
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{reset_string_cache, toggle_string_cache, SINGLE_LOCK};
use std::convert::TryFrom;
#[test]
fn test_categorical_round_trip() -> Result<()> {
let _lock = SINGLE_LOCK.lock();
reset_string_cache();
let slice = &[
Some("foo"),
None,
Some("bar"),
Some("foo"),
Some("foo"),
Some("bar"),
];
let ca = Utf8Chunked::new_from_opt_slice("a", slice);
let ca = ca.cast::<CategoricalType>()?;
let arr: DictionaryArray<u32> = (&ca).into();
let s = Series::try_from(("foo", Arc::new(arr) as ArrayRef))?;
assert_eq!(s.dtype(), &DataType::Categorical);
assert_eq!(s.null_count(), 1);
assert_eq!(s.len(), 6);
Ok(())
}
}
| 33.481928 | 85 | 0.517812 |
d57268696ca4c26dc726516ea52689268dba5121 | 2,417 | use crate::extensions::tests::{HashType, SUDT_CODE_HASH};
use crate::utils::to_fixed_array;
use ckb_types::bytes::Bytes;
use ckb_types::core::{BlockNumber, BlockView, TransactionBuilder, TransactionView};
use ckb_types::{packed, prelude::*};
fn create_ckb_script(args: Bytes, hash_type: HashType) -> packed::Script {
packed::Script::new_builder()
.args(args.pack())
.hash_type(hash_type.into())
.build()
}
pub fn create_sudt_script(args: Bytes) -> packed::Script {
let code_hash = hex::decode(SUDT_CODE_HASH).unwrap();
packed::Script::new_builder()
.args(args.pack())
.hash_type(HashType::Data.into())
.code_hash(to_fixed_array(&code_hash).pack())
.build()
}
pub fn create_ckb_cell(lock_args: Bytes, capacity: u64) -> packed::CellOutput {
packed::CellOutput::new_builder()
.lock(create_ckb_script(lock_args, HashType::Data))
.capacity(capacity.pack())
.build()
}
pub fn create_sudt_cell(lock_args: Bytes, sudt_args: Bytes, capacity: u64) -> packed::CellOutput {
packed::CellOutput::new_builder()
.lock(create_ckb_script(lock_args, HashType::Data))
.type_(Some(create_sudt_script(sudt_args)).pack())
.capacity(capacity.pack())
.build()
}
pub fn create_input_cell(
out_point: packed::OutPoint,
block_number: BlockNumber,
) -> packed::CellInput {
packed::CellInput::new(out_point, block_number)
}
pub fn default_data_list(len: usize) -> Vec<packed::Bytes> {
(0..len).map(|_| Default::default()).collect::<Vec<_>>()
}
pub fn default_witness_list(len: usize) -> Vec<packed::Bytes> {
(0..len).map(|_| Default::default()).collect::<Vec<_>>()
}
pub fn create_transaction(
inputs: Vec<packed::CellInput>,
outputs: Vec<packed::CellOutput>,
outputs_data: Vec<packed::Bytes>,
witnesses: Vec<packed::Bytes>,
) -> TransactionView {
TransactionBuilder::default()
.set_inputs(inputs)
.set_outputs(outputs)
.set_witnesses(witnesses)
.outputs_data(outputs_data)
.build()
}
pub fn create_block(
number: BlockNumber,
epoch: u64,
transactions: Vec<TransactionView>,
) -> BlockView {
packed::BlockBuilder::default()
.build()
.into_view()
.as_advanced_builder()
.number(number.pack())
.epoch(epoch.pack())
.transactions(transactions)
.build()
}
| 29.120482 | 98 | 0.65453 |
edaa3d536419fbc9b324501faf2f7a12ac49044b | 21,870 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<03157c2f0dc63be231501d1db300f003>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
#![allow(unused_variables)]
use super::node_mut::NodeMut;
use super::type_params::Params;
use crate::{aast::*, aast_defs::*, ast_defs::*, doc_comment::*};
pub fn visit<'node, P: Params>(
v: &mut impl VisitorMut<'node, P = P>,
c: &mut P::Context,
p: &'node mut impl NodeMut<P>,
) -> Result<(), P::Error> {
p.accept(c, v)
}
pub trait VisitorMut<'node> {
type P: Params;
fn object(&mut self) -> &mut dyn VisitorMut<'node, P = Self::P>;
fn visit_ex(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut <Self::P as Params>::Ex,
) -> Result<(), <Self::P as Params>::Error> {
Ok(())
}
fn visit_en(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut <Self::P as Params>::En,
) -> Result<(), <Self::P as Params>::Error> {
Ok(())
}
fn visit_abstraction(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Abstraction,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_afield(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Afield<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_as_expr(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut AsExpr<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_bop(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Bop,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_ca_field(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut CaField<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_ca_type(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut CaType,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_case(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Case<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_catch(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Catch<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_abstract_typeconst(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassAbstractTypeconst,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_attr(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassAttr<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_concrete_typeconst(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassConcreteTypeconst,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_const(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassConst<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_const_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassConstKind<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_get_expr(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassGetExpr<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_id(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassId<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_id_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassId_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_typeconst(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassTypeconst,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_typeconst_def(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassTypeconstDef<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_var(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassVar<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_class_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Class_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_classish_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ClassishKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_collection_targ(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut CollectionTarg<<Self::P as Params>::Ex>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_constraint_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ConstraintKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_contexts(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Contexts,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_def(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Def<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_doc_comment(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut DocComment,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_emit_id(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut EmitId,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_enum_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Enum_,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_env_annot(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut EnvAnnot,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_expr(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Expr<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_expr_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Expr_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_expression_tree(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ExpressionTree<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_field(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Field<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_file_attribute(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FileAttribute<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_fun_def(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FunDef<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_fun_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FunKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_fun_param(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FunParam<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_fun_variadicity(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FunVariadicity<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_fun_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Fun_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_func_body(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FuncBody<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_function_ptr_id(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut FunctionPtrId<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_gconst(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Gconst<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_hf_param_info(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut HfParamInfo,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_hint(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Hint,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_hint_fun(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut HintFun,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_hint_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Hint_,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_hole_source(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut HoleSource,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_id(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Id,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_import_flavor(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ImportFlavor,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_insteadof_alias(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut InsteadofAlias,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_kvc_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut KvcKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_lid(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Lid,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_method_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Method_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_nast_shape_info(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut NastShapeInfo,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_ns_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut NsKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_og_null_flavor(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut OgNullFlavor,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_param_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ParamKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_readonly_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ReadonlyKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_record_def(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut RecordDef<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_reify_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ReifyKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_shape_field_info(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ShapeFieldInfo,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_shape_field_name(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut ShapeFieldName,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_stmt(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Stmt<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_stmt_(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Stmt_<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_targ(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Targ<<Self::P as Params>::Ex>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_tparam(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Tparam<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_tprim(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Tprim,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_type_hint(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut TypeHint<<Self::P as Params>::Ex>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_typedef(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Typedef<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_typedef_visibility(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut TypedefVisibility,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_uop(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Uop,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_use_as_alias(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut UseAsAlias,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_use_as_visibility(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut UseAsVisibility,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_user_attribute(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut UserAttribute<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_using_stmt(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut UsingStmt<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_variance(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Variance,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_vc_kind(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut VcKind,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_visibility(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut Visibility,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_where_constraint_hint(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut WhereConstraintHint,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_attr(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpAttr<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_attr_info(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpAttrInfo,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_attr_tag(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpAttrTag,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_attribute(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpAttribute<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_child(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpChild,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_child_op(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpChildOp,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_enum_value(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpEnumValue,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
fn visit_xhp_simple(
&mut self,
c: &mut <Self::P as Params>::Context,
p: &'node mut XhpSimple<<Self::P as Params>::Ex, <Self::P as Params>::En>,
) -> Result<(), <Self::P as Params>::Error> {
p.recurse(c, self.object())
}
}
| 33.697997 | 90 | 0.520073 |
870e4c23aabd0ebff8221efef5475cda39166c98 | 3,161 | use super::Command;
use crate::{
check_db,
cli::Config,
lib::{
entity::{Master, Password},
Cipher, Decrypted, Encrypted, EntryKey, KyEnv, KyError, KyResult, KyTable, Prompt, Qr,
MASTER,
},
};
use clap::Parser;
use tabled::{Alignment, Disable, Full, Indent, Modify, Style, Table, Tabled};
#[derive(Tabled)]
struct Tr(&'static str, String);
#[derive(Debug, Parser)]
pub struct Show {
/// Entry which need to be shown
key: EntryKey,
/// Show password in clear text
#[clap(short = 'C', long)]
clear: bool,
/// Show password in a form of qr code
#[clap(short, long)]
qr_code: bool,
/// Don't print the details, can be used with qr code
#[clap(short, long, conflicts_with = "clear")]
mute: bool,
}
impl Command for Show {
fn exec(&self, config: Config) -> KyResult<()> {
let db_path = config.db_path();
check_db!(db_path);
let master_pwd = Master::ask(&Prompt::theme())?;
let env = KyEnv::connect(&db_path)?;
let common_db = env.get_table(KyTable::Common)?;
let pwd_db = env.get_table(KyTable::Password)?;
let rtxn = env.read_txn()?;
let hashed = common_db.get(&rtxn, &Encrypted::from(MASTER))?;
if !master_pwd.verify(hashed.as_ref())? {
return Err(KyError::MisMatch);
}
let master_cipher = Cipher::for_master(&master_pwd);
let enc_key = master_cipher.encrypt(&Decrypted::from(&self.key))?;
// The crypted data returned from database
// Will be in this format password:username:website:expires:notes
let encrypted = pwd_db.get(&rtxn, &enc_key)?;
rtxn.commit()?;
env.close();
let key_master = Cipher::for_key(&master_pwd, &self.key)?;
let val = Password::decrypt(&key_master, &encrypted)?;
// We can use threads to decrypt each of them
// and later use .join() to grab the decrypted value
// Which will make this decryption way faster
// I tried and I failed, maybe next time
if self.qr_code {
let code = Qr::new(&val.password)?.render();
eprint!("{}", code);
}
// If the output is muted then no need to print the table
if self.mute {
return Ok(());
}
let decrypted = [
Tr("Username", val.username),
Tr(
"Password",
if self.clear {
val.password
} else {
"*".repeat(15)
},
),
Tr("Website", val.website),
Tr("Expires", val.expires),
Tr("Notes", val.notes),
];
let table = Table::new(&decrypted)
.with(Disable::Row(..1))
.with(Style::pseudo_clean().header(None))
.with(
Modify::new(Full)
.with(Alignment::left())
.with(Indent::new(1, 1, 0, 0)),
);
// Don't println! because last line of table already contains a line feed
print!("{}", table);
Ok(())
}
}
| 27.486957 | 94 | 0.536539 |
29d5240d56855673b1e7c70a03a86273ce04d3b9 | 6,507 | use std::convert::Infallible;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::ops::Add;
use std::sync::Arc;
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use http::StatusCode;
use openidconnect::{
AuthorizationCode, ClientId, ClientSecret, CsrfToken, IssuerUrl, Nonce, OAuth2TokenResponse,
RedirectUrl, RequestTokenError, Scope,
};
use openidconnect::core::{CoreAuthenticationFlow, CoreClient, CoreProviderMetadata};
use serde::Deserialize;
use tokio::sync::{mpsc, RwLock};
use tokio::sync::mpsc::Sender;
use warp::{Filter, Reply};
use crate::auth::AuthProvider;
use crate::context::{Auth, Context};
use crate::error::Error;
#[derive(Debug, Clone)]
pub struct TokenSet {
pub access_token: String,
pub refresh_token: Option<String>,
pub expires_at: DateTime<Utc>,
}
#[derive(Clone)]
pub struct OidcProvider {
issuer_url: String,
client_id: String,
client_secret: Option<String>,
scopes: Vec<Scope>,
client: CoreClient,
tokens: Option<TokenSet>,
port: u16,
}
impl OidcProvider {
pub async fn new(
issuer_url: impl ToString,
client_id: impl ToString,
client_secret: Option<impl ToString>,
scope: impl ToString,
port: u16,
) -> Result<Self, Error> {
let metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(issuer_url.to_string())?,
openidconnect::reqwest::async_http_client,
)
.await
.map_err(|err| Error::Auth(err.into()))?;
let client_secret = client_secret.map(|secret| secret.to_string());
let scopes = scope
.to_string()
.split(' ')
.map(|scope| Scope::new(scope.to_string()))
.collect();
Ok(Self {
issuer_url: issuer_url.to_string(),
client_id: client_id.to_string(),
client: CoreClient::from_provider_metadata(
metadata,
ClientId::new(client_id.to_string()),
client_secret
.as_ref()
.map(|secret| ClientSecret::new(secret.to_string())),
)
.set_redirect_uri(RedirectUrl::new(format!(
"http://localhost:{}/callback",
port
))?),
client_secret,
scopes,
tokens: None,
port,
})
}
}
#[async_trait]
impl AuthProvider for OidcProvider {
async fn login(&self, mut ctx: Context) -> Result<Context, Error> {
// Generate the full authorization URL.
let mut req = self.client.authorize_url(
CoreAuthenticationFlow::AuthorizationCode,
CsrfToken::new_random,
Nonce::new_random,
);
for scope in self.scopes.clone() {
req = req.add_scope(scope);
}
let (auth_url, _csrf_token, _nonce) = req.url();
let this = Arc::new(RwLock::new(self.clone()));
let (tx, mut rx) = mpsc::channel(1);
let app = warp::get()
.and(warp::path("callback"))
.and(with_provider(this.clone()))
.and(with_shutdown_signal(tx))
.and(warp::query::query::<CallbackQuery>())
.and_then(callback_handler);
let (_addr, server) = warp::serve(app).bind_with_graceful_shutdown(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), self.port),
async move {
rx.recv().await.expect("shutdown::recv");
},
);
open::that(&auth_url.to_string())?;
tracing::info!("The login page has been opened on your default browser. You can also manually visit {}", auth_url);
server.await;
let this = this.read().await;
let tokens = this.tokens.as_ref().unwrap();
ctx.set_auth(Auth::Oidc {
issuer_url: this.issuer_url.clone(),
client_id: this.client_id.clone(),
access_token: tokens.access_token.clone(),
refresh_token: tokens.refresh_token.clone(),
expires_at: tokens.expires_at,
});
Ok(ctx)
}
}
fn with_provider(
provider: Arc<RwLock<OidcProvider>>,
) -> impl Filter<Extract = (Arc<RwLock<OidcProvider>>,), Error = Infallible> + Clone {
warp::any().map(move || provider.clone())
}
fn with_shutdown_signal(
tx: Sender<()>,
) -> impl Filter<Extract = (Sender<()>,), Error = Infallible> + Clone {
warp::any().map(move || tx.clone())
}
async fn callback_handler(
provider: Arc<RwLock<OidcProvider>>,
tx: Sender<()>,
CallbackQuery { code, state: _state }: CallbackQuery,
) -> Result<impl Reply, warp::Rejection> {
let mut provider = provider.write().await;
let token_response = provider
.client
.exchange_code(AuthorizationCode::new(code))
.request_async(openidconnect::reqwest::async_http_client)
.await;
if let Err(err) = token_response {
let msg = match &err {
RequestTokenError::ServerResponse(res) => res.to_string(),
RequestTokenError::Request(inner) => inner.to_string(),
RequestTokenError::Parse(inner, _) => inner.to_string(),
RequestTokenError::Other(_) => "".to_string(),
};
tracing::info!("ERROR: {} {}", err, msg);
tx.send(()).await.expect("shutdown::send");
return Ok(warp::reply::with_status(
warp::reply::html(format!(
r#"
<h1>ERROR</h1>
<h2>{}</h2>
<p>{}</p>
"#,
err, msg
)),
StatusCode::BAD_REQUEST,
));
}
let token_response = token_response.unwrap();
provider.tokens = Some(TokenSet {
access_token: token_response.access_token().secret().clone(),
refresh_token: token_response
.refresh_token()
.map(|token| token.secret().clone()),
expires_at: Utc::now().add(
token_response
.expires_in()
.map(|duration| Duration::from_std(duration).expect("Duration::from_std"))
.unwrap_or_else(|| Duration::seconds(0)),
),
});
tx.send(()).await.expect("shutdown::send");
Ok(warp::reply::with_status(
warp::reply::html(
"<h1>Authentication completed!</h1><p>You can close this window now.</p>".to_string(),
),
StatusCode::OK,
))
}
#[derive(Debug, Deserialize)]
struct CallbackQuery {
code: String,
state: String,
}
| 32.212871 | 123 | 0.58122 |
09ba0cd892b9a6955004de7f7a4e3c56b32eb639 | 41,238 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Module containing functionality to compute array equality.
//! This module uses [ArrayData] and does not
//! depend on dynamic casting of `Array`.
use super::{
Array, ArrayData, BinaryOffsetSizeTrait, BooleanArray, DecimalArray,
FixedSizeBinaryArray, FixedSizeListArray, GenericBinaryArray, GenericListArray,
GenericStringArray, MapArray, NullArray, OffsetSizeTrait, PrimitiveArray,
StringOffsetSizeTrait, StructArray,
};
use crate::{
buffer::Buffer,
datatypes::{ArrowPrimitiveType, DataType, IntervalUnit},
};
use half::f16;
mod boolean;
mod decimal;
mod dictionary;
mod fixed_binary;
mod fixed_list;
mod list;
mod null;
mod primitive;
mod structure;
mod utils;
mod variable_size;
// these methods assume the same type, len and null count.
// For this reason, they are not exposed and are instead used
// to build the generic functions below (`equal_range` and `equal`).
use boolean::boolean_equal;
use decimal::decimal_equal;
use dictionary::dictionary_equal;
use fixed_binary::fixed_binary_equal;
use fixed_list::fixed_list_equal;
use list::list_equal;
use null::null_equal;
use primitive::primitive_equal;
use structure::struct_equal;
use variable_size::variable_sized_equal;
impl PartialEq for dyn Array {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl<T: Array> PartialEq<T> for dyn Array {
fn eq(&self, other: &T) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for NullArray {
fn eq(&self, other: &NullArray) -> bool {
equal(self.data(), other.data())
}
}
impl<T: ArrowPrimitiveType> PartialEq for PrimitiveArray<T> {
fn eq(&self, other: &PrimitiveArray<T>) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for BooleanArray {
fn eq(&self, other: &BooleanArray) -> bool {
equal(self.data(), other.data())
}
}
impl<OffsetSize: StringOffsetSizeTrait> PartialEq for GenericStringArray<OffsetSize> {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl<OffsetSize: BinaryOffsetSizeTrait> PartialEq for GenericBinaryArray<OffsetSize> {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for FixedSizeBinaryArray {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for DecimalArray {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl<OffsetSize: OffsetSizeTrait> PartialEq for GenericListArray<OffsetSize> {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for MapArray {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for FixedSizeListArray {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
impl PartialEq for StructArray {
fn eq(&self, other: &Self) -> bool {
equal(self.data(), other.data())
}
}
/// Compares the values of two [ArrayData] starting at `lhs_start` and `rhs_start` respectively
/// for `len` slots. The null buffers `lhs_nulls` and `rhs_nulls` inherit parent nullability.
///
/// If an array is a child of a struct or list, the array's nulls have to be merged with the parent.
/// This then affects the null count of the array, thus the merged nulls are passed separately
/// as `lhs_nulls` and `rhs_nulls` variables to functions.
/// The nulls are merged with a bitwise AND, and null counts are recomputed where necessary.
#[inline]
fn equal_values(
lhs: &ArrayData,
rhs: &ArrayData,
lhs_nulls: Option<&Buffer>,
rhs_nulls: Option<&Buffer>,
lhs_start: usize,
rhs_start: usize,
len: usize,
) -> bool {
match lhs.data_type() {
DataType::Null => null_equal(lhs, rhs, lhs_start, rhs_start, len),
DataType::Boolean => {
boolean_equal(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::UInt8 => primitive_equal::<u8>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt16 => primitive_equal::<u16>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt32 => primitive_equal::<u32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt64 => primitive_equal::<u64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int8 => primitive_equal::<i8>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int16 => primitive_equal::<i16>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int32 => primitive_equal::<i32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int64 => primitive_equal::<i64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Float32 => primitive_equal::<f32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Float64 => primitive_equal::<f64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Date32
| DataType::Time32(_)
| DataType::Interval(IntervalUnit::YearMonth) => primitive_equal::<i32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Date64
| DataType::Interval(IntervalUnit::DayTime)
| DataType::Time64(_)
| DataType::Timestamp(_, _)
| DataType::Duration(_) => primitive_equal::<i64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Interval(IntervalUnit::MonthDayNano) => primitive_equal::<i128>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Utf8 | DataType::Binary => variable_sized_equal::<i32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::LargeUtf8 | DataType::LargeBinary => variable_sized_equal::<i64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::FixedSizeBinary(_) => {
fixed_binary_equal(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::Decimal(_, _) => {
decimal_equal(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::List(_) => {
list_equal::<i32>(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::LargeList(_) => {
list_equal::<i64>(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::FixedSizeList(_, _) => {
fixed_list_equal(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::Struct(_) => {
struct_equal(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
DataType::Union(_, _) => unimplemented!("See ARROW-8576"),
DataType::Dictionary(data_type, _) => match data_type.as_ref() {
DataType::Int8 => dictionary_equal::<i8>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int16 => dictionary_equal::<i16>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int32 => dictionary_equal::<i32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Int64 => dictionary_equal::<i64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt8 => dictionary_equal::<u8>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt16 => dictionary_equal::<u16>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt32 => dictionary_equal::<u32>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::UInt64 => dictionary_equal::<u64>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
_ => unreachable!(),
},
DataType::Float16 => primitive_equal::<f16>(
lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len,
),
DataType::Map(_, _) => {
list_equal::<i32>(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
}
}
fn equal_range(
lhs: &ArrayData,
rhs: &ArrayData,
lhs_nulls: Option<&Buffer>,
rhs_nulls: Option<&Buffer>,
lhs_start: usize,
rhs_start: usize,
len: usize,
) -> bool {
utils::base_equal(lhs, rhs)
&& utils::equal_nulls(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
&& equal_values(lhs, rhs, lhs_nulls, rhs_nulls, lhs_start, rhs_start, len)
}
/// Logically compares two [ArrayData].
/// Two arrays are logically equal if and only if:
/// * their data types are equal
/// * their lengths are equal
/// * their null counts are equal
/// * their null bitmaps are equal
/// * each of their items are equal
/// two items are equal when their in-memory representation is physically equal (i.e. same bit content).
/// The physical comparison depend on the data type.
/// # Panics
/// This function may panic whenever any of the [ArrayData] does not follow the Arrow specification.
/// (e.g. wrong number of buffers, buffer `len` does not correspond to the declared `len`)
pub fn equal(lhs: &ArrayData, rhs: &ArrayData) -> bool {
let lhs_nulls = lhs.null_buffer();
let rhs_nulls = rhs.null_buffer();
utils::base_equal(lhs, rhs)
&& lhs.null_count() == rhs.null_count()
&& utils::equal_nulls(lhs, rhs, lhs_nulls, rhs_nulls, 0, 0, lhs.len())
&& equal_values(lhs, rhs, lhs_nulls, rhs_nulls, 0, 0, lhs.len())
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use std::sync::Arc;
use crate::array::{
array::Array, ArrayDataBuilder, ArrayRef, BinaryOffsetSizeTrait, BooleanArray,
DecimalBuilder, FixedSizeBinaryBuilder, FixedSizeListBuilder, GenericBinaryArray,
Int32Builder, ListBuilder, NullArray, PrimitiveBuilder, StringArray,
StringDictionaryBuilder, StringOffsetSizeTrait, StructArray,
};
use crate::array::{GenericStringArray, Int32Array};
use crate::buffer::Buffer;
use crate::datatypes::{Field, Int16Type, ToByteSlice};
use super::*;
#[test]
fn test_null_equal() {
let a = NullArray::new(12);
let a = a.data();
let b = NullArray::new(12);
let b = b.data();
test_equal(a, b, true);
let b = NullArray::new(10);
let b = b.data();
test_equal(a, b, false);
// Test the case where offset != 0
let a_slice = a.slice(2, 3);
let b_slice = b.slice(1, 3);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(5, 4);
let b_slice = b.slice(3, 3);
test_equal(&a_slice, &b_slice, false);
}
#[test]
fn test_boolean_equal() {
let a = BooleanArray::from(vec![false, false, true]);
let a = a.data();
let b = BooleanArray::from(vec![false, false, true]);
let b = b.data();
test_equal(a, b, true);
let b = BooleanArray::from(vec![false, false, false]);
let b = b.data();
test_equal(a, b, false);
}
#[test]
fn test_boolean_equal_nulls() {
let a = BooleanArray::from(vec![Some(false), None, None, Some(true)]);
let a = a.data();
let b = BooleanArray::from(vec![Some(false), None, None, Some(true)]);
let b = b.data();
test_equal(a, b, true);
let b = BooleanArray::from(vec![None, None, None, Some(true)]);
let b = b.data();
test_equal(a, b, false);
let b = BooleanArray::from(vec![Some(true), None, None, Some(true)]);
let b = b.data();
test_equal(a, b, false);
}
#[test]
fn test_boolean_equal_offset() {
let a = BooleanArray::from(vec![false, true, false, true, false, false, true]);
let a = a.data();
let b =
BooleanArray::from(vec![true, false, false, false, true, false, true, true]);
let b = b.data();
assert!(!equal(a, b));
assert!(!equal(b, a));
let a_slice = a.slice(2, 3);
let b_slice = b.slice(3, 3);
assert!(equal(&a_slice, &b_slice));
assert!(equal(&b_slice, &a_slice));
let a_slice = a.slice(3, 4);
let b_slice = b.slice(4, 4);
assert!(!equal(&a_slice, &b_slice));
assert!(!equal(&b_slice, &a_slice));
// Test the optimization cases where null_count == 0 and starts at 0 and len >= size_of(u8)
// Elements fill in `u8`'s exactly.
let mut vector = vec![false, false, true, true, true, true, true, true];
let a = BooleanArray::from(vector.clone());
let a = a.data();
let b = BooleanArray::from(vector.clone());
let b = b.data();
test_equal(a, b, true);
// Elements fill in `u8`s + suffix bits.
vector.push(true);
let a = BooleanArray::from(vector.clone());
let a = a.data();
let b = BooleanArray::from(vector);
let b = b.data();
test_equal(a, b, true);
}
#[test]
fn test_primitive() {
let cases = vec![
(
vec![Some(1), Some(2), Some(3)],
vec![Some(1), Some(2), Some(3)],
true,
),
(
vec![Some(1), Some(2), Some(3)],
vec![Some(1), Some(2), Some(4)],
false,
),
(
vec![Some(1), Some(2), None],
vec![Some(1), Some(2), None],
true,
),
(
vec![Some(1), None, Some(3)],
vec![Some(1), Some(2), None],
false,
),
(
vec![Some(1), None, None],
vec![Some(1), Some(2), None],
false,
),
];
for (lhs, rhs, expected) in cases {
let lhs = Int32Array::from(lhs);
let lhs = lhs.data();
let rhs = Int32Array::from(rhs);
let rhs = rhs.data();
test_equal(lhs, rhs, expected);
}
}
#[test]
fn test_primitive_slice() {
let cases = vec![
(
vec![Some(1), Some(2), Some(3)],
(0, 1),
vec![Some(1), Some(2), Some(3)],
(0, 1),
true,
),
(
vec![Some(1), Some(2), Some(3)],
(1, 1),
vec![Some(1), Some(2), Some(3)],
(2, 1),
false,
),
(
vec![Some(1), Some(2), None],
(1, 1),
vec![Some(1), None, Some(2)],
(2, 1),
true,
),
(
vec![None, Some(2), None],
(1, 1),
vec![None, None, Some(2)],
(2, 1),
true,
),
(
vec![Some(1), None, Some(2), None, Some(3)],
(2, 2),
vec![None, Some(2), None, Some(3)],
(1, 2),
true,
),
];
for (lhs, slice_lhs, rhs, slice_rhs, expected) in cases {
let lhs = Int32Array::from(lhs);
let lhs = lhs.data();
let lhs = lhs.slice(slice_lhs.0, slice_lhs.1);
let rhs = Int32Array::from(rhs);
let rhs = rhs.data();
let rhs = rhs.slice(slice_rhs.0, slice_rhs.1);
test_equal(&lhs, &rhs, expected);
}
}
fn test_equal(lhs: &ArrayData, rhs: &ArrayData, expected: bool) {
// equality is symmetric
assert!(equal(lhs, lhs), "\n{:?}\n{:?}", lhs, lhs);
assert!(equal(rhs, rhs), "\n{:?}\n{:?}", rhs, rhs);
assert_eq!(equal(lhs, rhs), expected, "\n{:?}\n{:?}", lhs, rhs);
assert_eq!(equal(rhs, lhs), expected, "\n{:?}\n{:?}", rhs, lhs);
}
fn binary_cases() -> Vec<(Vec<Option<String>>, Vec<Option<String>>, bool)> {
let base = vec![
Some("hello".to_owned()),
None,
None,
Some("world".to_owned()),
None,
None,
];
let not_base = vec![
Some("hello".to_owned()),
Some("foo".to_owned()),
None,
Some("world".to_owned()),
None,
None,
];
vec![
(
vec![Some("hello".to_owned()), Some("world".to_owned())],
vec![Some("hello".to_owned()), Some("world".to_owned())],
true,
),
(
vec![Some("hello".to_owned()), Some("world".to_owned())],
vec![Some("hello".to_owned()), Some("arrow".to_owned())],
false,
),
(base.clone(), base.clone(), true),
(base, not_base, false),
]
}
fn test_generic_string_equal<OffsetSize: StringOffsetSizeTrait>() {
let cases = binary_cases();
for (lhs, rhs, expected) in cases {
let lhs: GenericStringArray<OffsetSize> = lhs.into_iter().collect();
let lhs = lhs.data();
let rhs: GenericStringArray<OffsetSize> = rhs.into_iter().collect();
let rhs = rhs.data();
test_equal(lhs, rhs, expected);
}
}
#[test]
fn test_string_equal() {
test_generic_string_equal::<i32>()
}
#[test]
fn test_large_string_equal() {
test_generic_string_equal::<i64>()
}
fn test_generic_binary_equal<OffsetSize: BinaryOffsetSizeTrait>() {
let cases = binary_cases();
for (lhs, rhs, expected) in cases {
let lhs = lhs
.iter()
.map(|x| x.as_deref().map(|x| x.as_bytes()))
.collect();
let rhs = rhs
.iter()
.map(|x| x.as_deref().map(|x| x.as_bytes()))
.collect();
let lhs = GenericBinaryArray::<OffsetSize>::from_opt_vec(lhs);
let lhs = lhs.data();
let rhs = GenericBinaryArray::<OffsetSize>::from_opt_vec(rhs);
let rhs = rhs.data();
test_equal(lhs, rhs, expected);
}
}
#[test]
fn test_binary_equal() {
test_generic_binary_equal::<i32>()
}
#[test]
fn test_large_binary_equal() {
test_generic_binary_equal::<i64>()
}
#[test]
fn test_string_offset() {
let a = StringArray::from(vec![Some("a"), None, Some("b")]);
let a = a.data();
let a = a.slice(2, 1);
let b = StringArray::from(vec![Some("b")]);
let b = b.data();
test_equal(&a, b, true);
}
#[test]
fn test_string_offset_larger() {
let a = StringArray::from(vec![Some("a"), None, Some("b"), None, Some("c")]);
let a = a.data();
let b = StringArray::from(vec![None, Some("b"), None, Some("c")]);
let b = b.data();
test_equal(&a.slice(2, 2), &b.slice(0, 2), false);
test_equal(&a.slice(2, 2), &b.slice(1, 2), true);
test_equal(&a.slice(2, 2), &b.slice(2, 2), false);
}
#[test]
fn test_null() {
let a = NullArray::new(2);
let a = a.data();
let b = NullArray::new(2);
let b = b.data();
test_equal(a, b, true);
let b = NullArray::new(1);
let b = b.data();
test_equal(a, b, false);
}
fn create_list_array<U: AsRef<[i32]>, T: AsRef<[Option<U>]>>(data: T) -> ArrayData {
let mut builder = ListBuilder::new(Int32Builder::new(10));
for d in data.as_ref() {
if let Some(v) = d {
builder.values().append_slice(v.as_ref()).unwrap();
builder.append(true).unwrap()
} else {
builder.append(false).unwrap()
}
}
builder.finish().data().clone()
}
#[test]
fn test_list_equal() {
let a = create_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 6])]);
let b = create_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 6])]);
test_equal(&a, &b, true);
let b = create_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 7])]);
test_equal(&a, &b, false);
}
// Test the case where null_count > 0
#[test]
fn test_list_null() {
let a =
create_list_array(&[Some(&[1, 2]), None, None, Some(&[3, 4]), None, None]);
let b =
create_list_array(&[Some(&[1, 2]), None, None, Some(&[3, 4]), None, None]);
test_equal(&a, &b, true);
let b = create_list_array(&[
Some(&[1, 2]),
None,
Some(&[5, 6]),
Some(&[3, 4]),
None,
None,
]);
test_equal(&a, &b, false);
let b =
create_list_array(&[Some(&[1, 2]), None, None, Some(&[3, 5]), None, None]);
test_equal(&a, &b, false);
// a list where the nullness of values is determined by the list's bitmap
let c_values = Int32Array::from(vec![1, 2, -1, -2, 3, 4, -3, -4]);
let c = ArrayDataBuilder::new(DataType::List(Box::new(Field::new(
"item",
DataType::Int32,
true,
))))
.len(6)
.add_buffer(Buffer::from(vec![0i32, 2, 3, 4, 6, 7, 8].to_byte_slice()))
.add_child_data(c_values.data().clone())
.null_bit_buffer(Buffer::from(vec![0b00001001]))
.build()
.unwrap();
let d_values = Int32Array::from(vec![
Some(1),
Some(2),
None,
None,
Some(3),
Some(4),
None,
None,
]);
let d = ArrayDataBuilder::new(DataType::List(Box::new(Field::new(
"item",
DataType::Int32,
true,
))))
.len(6)
.add_buffer(Buffer::from(vec![0i32, 2, 3, 4, 6, 7, 8].to_byte_slice()))
.add_child_data(d_values.data().clone())
.null_bit_buffer(Buffer::from(vec![0b00001001]))
.build()
.unwrap();
test_equal(&c, &d, true);
}
// Test the case where offset != 0
#[test]
fn test_list_offsets() {
let a =
create_list_array(&[Some(&[1, 2]), None, None, Some(&[3, 4]), None, None]);
let b =
create_list_array(&[Some(&[1, 2]), None, None, Some(&[3, 5]), None, None]);
let a_slice = a.slice(0, 3);
let b_slice = b.slice(0, 3);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(0, 5);
let b_slice = b.slice(0, 5);
test_equal(&a_slice, &b_slice, false);
let a_slice = a.slice(4, 1);
let b_slice = b.slice(4, 1);
test_equal(&a_slice, &b_slice, true);
}
fn create_fixed_size_binary_array<U: AsRef<[u8]>, T: AsRef<[Option<U>]>>(
data: T,
) -> ArrayData {
let mut builder = FixedSizeBinaryBuilder::new(15, 5);
for d in data.as_ref() {
if let Some(v) = d {
builder.append_value(v.as_ref()).unwrap();
} else {
builder.append_null().unwrap();
}
}
builder.finish().data().clone()
}
#[test]
fn test_fixed_size_binary_equal() {
let a = create_fixed_size_binary_array(&[Some(b"hello"), Some(b"world")]);
let b = create_fixed_size_binary_array(&[Some(b"hello"), Some(b"world")]);
test_equal(&a, &b, true);
let b = create_fixed_size_binary_array(&[Some(b"hello"), Some(b"arrow")]);
test_equal(&a, &b, false);
}
// Test the case where null_count > 0
#[test]
fn test_fixed_size_binary_null() {
let a = create_fixed_size_binary_array(&[Some(b"hello"), None, Some(b"world")]);
let b = create_fixed_size_binary_array(&[Some(b"hello"), None, Some(b"world")]);
test_equal(&a, &b, true);
let b = create_fixed_size_binary_array(&[Some(b"hello"), Some(b"world"), None]);
test_equal(&a, &b, false);
let b = create_fixed_size_binary_array(&[Some(b"hello"), None, Some(b"arrow")]);
test_equal(&a, &b, false);
}
#[test]
fn test_fixed_size_binary_offsets() {
// Test the case where offset != 0
let a = create_fixed_size_binary_array(&[
Some(b"hello"),
None,
None,
Some(b"world"),
None,
None,
]);
let b = create_fixed_size_binary_array(&[
Some(b"hello"),
None,
None,
Some(b"arrow"),
None,
None,
]);
let a_slice = a.slice(0, 3);
let b_slice = b.slice(0, 3);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(0, 5);
let b_slice = b.slice(0, 5);
test_equal(&a_slice, &b_slice, false);
let a_slice = a.slice(4, 1);
let b_slice = b.slice(4, 1);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(3, 1);
let b_slice = b.slice(3, 1);
test_equal(&a_slice, &b_slice, false);
}
fn create_decimal_array(data: &[Option<i128>]) -> ArrayData {
let mut builder = DecimalBuilder::new(20, 23, 6);
for d in data {
if let Some(v) = d {
builder.append_value(*v).unwrap();
} else {
builder.append_null().unwrap();
}
}
builder.finish().data().clone()
}
#[test]
fn test_decimal_equal() {
let a = create_decimal_array(&[Some(8_887_000_000), Some(-8_887_000_000)]);
let b = create_decimal_array(&[Some(8_887_000_000), Some(-8_887_000_000)]);
test_equal(&a, &b, true);
let b = create_decimal_array(&[Some(15_887_000_000), Some(-8_887_000_000)]);
test_equal(&a, &b, false);
}
// Test the case where null_count > 0
#[test]
fn test_decimal_null() {
let a = create_decimal_array(&[Some(8_887_000_000), None, Some(-8_887_000_000)]);
let b = create_decimal_array(&[Some(8_887_000_000), None, Some(-8_887_000_000)]);
test_equal(&a, &b, true);
let b = create_decimal_array(&[Some(8_887_000_000), Some(-8_887_000_000), None]);
test_equal(&a, &b, false);
let b = create_decimal_array(&[Some(15_887_000_000), None, Some(-8_887_000_000)]);
test_equal(&a, &b, false);
}
#[test]
fn test_decimal_offsets() {
// Test the case where offset != 0
let a = create_decimal_array(&[
Some(8_887_000_000),
None,
None,
Some(-8_887_000_000),
None,
None,
]);
let b = create_decimal_array(&[
None,
Some(8_887_000_000),
None,
None,
Some(15_887_000_000),
None,
None,
]);
let a_slice = a.slice(0, 3);
let b_slice = b.slice(1, 3);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(0, 5);
let b_slice = b.slice(1, 5);
test_equal(&a_slice, &b_slice, false);
let a_slice = a.slice(4, 1);
let b_slice = b.slice(5, 1);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(3, 3);
let b_slice = b.slice(4, 3);
test_equal(&a_slice, &b_slice, false);
let a_slice = a.slice(1, 3);
let b_slice = b.slice(2, 3);
test_equal(&a_slice, &b_slice, false);
let b = create_decimal_array(&[
None,
None,
None,
Some(-8_887_000_000),
Some(-3_000),
None,
]);
let a_slice = a.slice(1, 3);
let b_slice = b.slice(1, 3);
test_equal(&a_slice, &b_slice, true);
}
/// Create a fixed size list of 2 value lengths
fn create_fixed_size_list_array<U: AsRef<[i32]>, T: AsRef<[Option<U>]>>(
data: T,
) -> ArrayData {
let mut builder = FixedSizeListBuilder::new(Int32Builder::new(10), 3);
for d in data.as_ref() {
if let Some(v) = d {
builder.values().append_slice(v.as_ref()).unwrap();
builder.append(true).unwrap()
} else {
for _ in 0..builder.value_length() {
builder.values().append_null().unwrap();
}
builder.append(false).unwrap()
}
}
builder.finish().data().clone()
}
#[test]
fn test_fixed_size_list_equal() {
let a = create_fixed_size_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 6])]);
let b = create_fixed_size_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 6])]);
test_equal(&a, &b, true);
let b = create_fixed_size_list_array(&[Some(&[1, 2, 3]), Some(&[4, 5, 7])]);
test_equal(&a, &b, false);
}
// Test the case where null_count > 0
#[test]
fn test_fixed_list_null() {
let a = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
None,
Some(&[4, 5, 6]),
None,
None,
]);
let b = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
None,
Some(&[4, 5, 6]),
None,
None,
]);
test_equal(&a, &b, true);
let b = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
Some(&[7, 8, 9]),
Some(&[4, 5, 6]),
None,
None,
]);
test_equal(&a, &b, false);
let b = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
None,
Some(&[3, 6, 9]),
None,
None,
]);
test_equal(&a, &b, false);
}
#[test]
fn test_fixed_list_offsets() {
// Test the case where offset != 0
let a = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
None,
Some(&[4, 5, 6]),
None,
None,
]);
let b = create_fixed_size_list_array(&[
Some(&[1, 2, 3]),
None,
None,
Some(&[3, 6, 9]),
None,
None,
]);
let a_slice = a.slice(0, 3);
let b_slice = b.slice(0, 3);
test_equal(&a_slice, &b_slice, true);
let a_slice = a.slice(0, 5);
let b_slice = b.slice(0, 5);
test_equal(&a_slice, &b_slice, false);
let a_slice = a.slice(4, 1);
let b_slice = b.slice(4, 1);
test_equal(&a_slice, &b_slice, true);
}
#[test]
fn test_struct_equal() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
None,
Some(4),
Some(5),
]));
let a =
StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())])
.unwrap();
let a = a.data();
let b = StructArray::try_from(vec![("f1", strings), ("f2", ints)]).unwrap();
let b = b.data();
test_equal(a, b, true);
}
#[test]
fn test_struct_equal_null() {
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doe"),
]));
let ints: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
Some(2),
None,
Some(4),
Some(5),
]));
let ints_non_null: ArrayRef = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 0]));
let a = ArrayData::builder(DataType::Struct(vec![
Field::new("f1", DataType::Utf8, true),
Field::new("f2", DataType::Int32, true),
]))
.null_bit_buffer(Buffer::from(vec![0b00001011]))
.len(5)
.add_child_data(strings.data_ref().clone())
.add_child_data(ints.data_ref().clone())
.build()
.unwrap();
let a = crate::array::make_array(a);
let b = ArrayData::builder(DataType::Struct(vec![
Field::new("f1", DataType::Utf8, true),
Field::new("f2", DataType::Int32, true),
]))
.null_bit_buffer(Buffer::from(vec![0b00001011]))
.len(5)
.add_child_data(strings.data_ref().clone())
.add_child_data(ints_non_null.data_ref().clone())
.build()
.unwrap();
let b = crate::array::make_array(b);
test_equal(a.data_ref(), b.data_ref(), true);
// test with arrays that are not equal
let c_ints_non_null: ArrayRef = Arc::new(Int32Array::from(vec![1, 2, 3, 0, 4]));
let c = ArrayData::builder(DataType::Struct(vec![
Field::new("f1", DataType::Utf8, true),
Field::new("f2", DataType::Int32, true),
]))
.null_bit_buffer(Buffer::from(vec![0b00001011]))
.len(5)
.add_child_data(strings.data_ref().clone())
.add_child_data(c_ints_non_null.data_ref().clone())
.build()
.unwrap();
let c = crate::array::make_array(c);
test_equal(a.data_ref(), c.data_ref(), false);
// test a nested struct
let a = ArrayData::builder(DataType::Struct(vec![Field::new(
"f3",
a.data_type().clone(),
true,
)]))
.null_bit_buffer(Buffer::from(vec![0b00011110]))
.len(5)
.add_child_data(a.data_ref().clone())
.build()
.unwrap();
let a = crate::array::make_array(a);
// reconstruct b, but with different data where the first struct is null
let strings: ArrayRef = Arc::new(StringArray::from(vec![
Some("joanne"), // difference
None,
None,
Some("mark"),
Some("doe"),
]));
let b = ArrayData::builder(DataType::Struct(vec![
Field::new("f1", DataType::Utf8, true),
Field::new("f2", DataType::Int32, true),
]))
.null_bit_buffer(Buffer::from(vec![0b00001011]))
.len(5)
.add_child_data(strings.data_ref().clone())
.add_child_data(ints_non_null.data_ref().clone())
.build()
.unwrap();
let b = ArrayData::builder(DataType::Struct(vec![Field::new(
"f3",
b.data_type().clone(),
true,
)]))
.null_bit_buffer(Buffer::from(vec![0b00011110]))
.len(5)
.add_child_data(b)
.build()
.unwrap();
let b = crate::array::make_array(b);
test_equal(a.data_ref(), b.data_ref(), true);
}
#[test]
fn test_struct_equal_null_variable_size() {
// the string arrays differ, but where the struct array is null
let strings1: ArrayRef = Arc::new(StringArray::from(vec![
Some("joe"),
None,
None,
Some("mark"),
Some("doel"),
]));
let strings2: ArrayRef = Arc::new(StringArray::from(vec![
Some("joel"),
None,
None,
Some("mark"),
Some("doe"),
]));
let a = ArrayData::builder(DataType::Struct(vec![Field::new(
"f1",
DataType::Utf8,
true,
)]))
.null_bit_buffer(Buffer::from(vec![0b00001010]))
.len(5)
.add_child_data(strings1.data_ref().clone())
.build()
.unwrap();
let a = crate::array::make_array(a);
let b = ArrayData::builder(DataType::Struct(vec![Field::new(
"f1",
DataType::Utf8,
true,
)]))
.null_bit_buffer(Buffer::from(vec![0b00001010]))
.len(5)
.add_child_data(strings2.data_ref().clone())
.build()
.unwrap();
let b = crate::array::make_array(b);
test_equal(a.data_ref(), b.data_ref(), true);
// test with arrays that are not equal
let strings3: ArrayRef = Arc::new(StringArray::from(vec![
Some("mark"),
None,
None,
Some("doe"),
Some("joe"),
]));
let c = ArrayData::builder(DataType::Struct(vec![Field::new(
"f1",
DataType::Utf8,
true,
)]))
.null_bit_buffer(Buffer::from(vec![0b00001011]))
.len(5)
.add_child_data(strings3.data_ref().clone())
.build()
.unwrap();
let c = crate::array::make_array(c);
test_equal(a.data_ref(), c.data_ref(), false);
}
fn create_dictionary_array(values: &[&str], keys: &[Option<&str>]) -> ArrayData {
let values = StringArray::from(values.to_vec());
let mut builder = StringDictionaryBuilder::new_with_dictionary(
PrimitiveBuilder::<Int16Type>::new(3),
&values,
)
.unwrap();
for key in keys {
if let Some(v) = key {
builder.append(v).unwrap();
} else {
builder.append_null().unwrap()
}
}
builder.finish().data().clone()
}
#[test]
fn test_dictionary_equal() {
// (a, b, c), (1, 2, 1, 3) => (a, b, a, c)
let a = create_dictionary_array(
&["a", "b", "c"],
&[Some("a"), Some("b"), Some("a"), Some("c")],
);
// different representation (values and keys are swapped), same result
let b = create_dictionary_array(
&["a", "c", "b"],
&[Some("a"), Some("b"), Some("a"), Some("c")],
);
test_equal(&a, &b, true);
// different len
let b =
create_dictionary_array(&["a", "c", "b"], &[Some("a"), Some("b"), Some("a")]);
test_equal(&a, &b, false);
// different key
let b = create_dictionary_array(
&["a", "c", "b"],
&[Some("a"), Some("b"), Some("a"), Some("a")],
);
test_equal(&a, &b, false);
// different values, same keys
let b = create_dictionary_array(
&["a", "b", "d"],
&[Some("a"), Some("b"), Some("a"), Some("d")],
);
test_equal(&a, &b, false);
}
#[test]
fn test_dictionary_equal_null() {
// (a, b, c), (1, 2, 1, 3) => (a, b, a, c)
let a = create_dictionary_array(
&["a", "b", "c"],
&[Some("a"), None, Some("a"), Some("c")],
);
// equal to self
test_equal(&a, &a, true);
// different representation (values and keys are swapped), same result
let b = create_dictionary_array(
&["a", "c", "b"],
&[Some("a"), None, Some("a"), Some("c")],
);
test_equal(&a, &b, true);
// different null position
let b = create_dictionary_array(
&["a", "c", "b"],
&[Some("a"), Some("b"), Some("a"), None],
);
test_equal(&a, &b, false);
// different key
let b = create_dictionary_array(
&["a", "c", "b"],
&[Some("a"), None, Some("a"), Some("a")],
);
test_equal(&a, &b, false);
// different values, same keys
let b = create_dictionary_array(
&["a", "b", "d"],
&[Some("a"), None, Some("a"), Some("d")],
);
test_equal(&a, &b, false);
}
}
| 31.697156 | 104 | 0.518866 |
fe19dadbe0243113ffde82585b78e34123115ef8 | 1,426 | // compile-flags: --document-private-items
#![feature(decl_macro)]
// @has decl_macro/macro.my_macro.html //pre 'pub macro my_macro() {'
// @has - //pre '...'
// @has - //pre '}'
pub macro my_macro() {
}
// @has decl_macro/macro.my_macro_2.html //pre 'pub macro my_macro_2($($tok : tt) *) {'
// @has - //pre '...'
// @has - //pre '}'
pub macro my_macro_2($($tok:tt)*) {
}
// @has decl_macro/macro.my_macro_multi.html //pre 'pub macro my_macro_multi {'
// @has - //pre '(_) => { ... },'
// @has - //pre '($foo : ident.$bar : expr) => { ... },'
// @has - //pre '($($foo : literal), +) => { ... },'
// @has - //pre '}'
pub macro my_macro_multi {
(_) => {
},
($foo:ident . $bar:expr) => {
},
($($foo:literal),+) => {
}
}
// @has decl_macro/macro.by_example_single.html //pre 'pub macro by_example_single($foo : expr) {'
// @has - //pre '...'
// @has - //pre '}'
pub macro by_example_single {
($foo:expr) => {}
}
mod a {
mod b {
// @has decl_macro/a/b/macro.by_example_vis.html //pre 'pub(super) macro by_example_vis($foo : expr) {'
pub(in super) macro by_example_vis {
($foo:expr) => {}
}
mod c {
// @has decl_macro/a/b/c/macro.by_example_vis_named.html //pre 'pub(in a) macro by_example_vis_named($foo : expr) {'
pub(in a) macro by_example_vis_named {
($foo:expr) => {}
}
}
}
}
| 25.017544 | 128 | 0.521038 |
7573c7d0594dcfbd2af0d40e3675d726a07793da | 7,704 | use bench::Bench;
use bytes::Bytes;
use futures::channel;
use p2p::{
async_trait,
builder::{MetaBuilder, ServiceBuilder},
context::{ProtocolContext, ProtocolContextMutRef},
multiaddr::Multiaddr,
secio::SecioKeyPair,
service::{
ProtocolHandle, ProtocolMeta, Service, ServiceControl, TargetProtocol, TargetSession,
},
traits::{ServiceHandle, ServiceProtocol},
ProtocolId,
};
use std::{sync::Once, thread};
use tokio_util::codec::length_delimited::Builder;
static START_SECIO: Once = Once::new();
static START_NO_SECIO: Once = Once::new();
static mut SECIO_CONTROL: Option<ServiceControl> = None;
static mut NO_SECIO_CONTROL: Option<ServiceControl> = None;
static mut SECIO_RECV: Option<crossbeam_channel::Receiver<Notify>> = None;
static mut NO_SECIO_RECV: Option<crossbeam_channel::Receiver<Notify>> = None;
#[derive(Debug, PartialEq)]
enum Notify {
Connected,
Message(bytes::Bytes),
}
pub fn create<F>(secio: bool, meta: ProtocolMeta, shandle: F) -> Service<F>
where
F: ServiceHandle + Unpin,
{
let builder = ServiceBuilder::default()
.insert_protocol(meta)
.forever(true);
if secio {
builder
.key_pair(SecioKeyPair::secp256k1_generated())
.build(shandle)
} else {
builder.build(shandle)
}
}
struct PHandle {
connected_count: usize,
sender: crossbeam_channel::Sender<Notify>,
}
#[async_trait]
impl ServiceProtocol for PHandle {
async fn init(&mut self, _control: &mut ProtocolContext) {}
async fn connected(&mut self, _control: ProtocolContextMutRef<'_>, _version: &str) {
self.connected_count += 1;
let _res = self.sender.send(Notify::Connected);
}
async fn disconnected(&mut self, _control: ProtocolContextMutRef<'_>) {
self.connected_count -= 1;
}
async fn received(&mut self, _env: ProtocolContextMutRef<'_>, data: bytes::Bytes) {
let _res = self.sender.send(Notify::Message(data));
}
}
fn create_meta(id: ProtocolId) -> (ProtocolMeta, crossbeam_channel::Receiver<Notify>) {
let (sender, receiver) = crossbeam_channel::bounded(1);
let meta = MetaBuilder::new()
.id(id)
.codec(|| {
Box::new(
Builder::new()
.max_frame_length(1024 * 1024 * 20)
.new_codec(),
)
})
.service_handle(move || {
if id == ProtocolId::default() {
ProtocolHandle::None
} else {
let handle = Box::new(PHandle {
connected_count: 0,
sender,
});
ProtocolHandle::Callback(handle)
}
})
.build();
(meta, receiver)
}
pub fn init() {
// init secio two peers
START_SECIO.call_once(|| {
let (meta, _receiver) = create_meta(ProtocolId::new(1));
let (addr_sender, addr_receiver) = channel::oneshot::channel::<Multiaddr>();
let mut service = create(true, meta, ());
let control = service.control().clone();
thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
let listen_addr = service
.listen("/ip4/127.0.0.1/tcp/0".parse().unwrap())
.await
.unwrap();
let _res = addr_sender.send(listen_addr);
service.run().await
});
});
let (meta, client_receiver) = create_meta(1.into());
thread::spawn(|| {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut service = create(true, meta, ());
rt.block_on(async move {
let listen_addr = addr_receiver.await.unwrap();
service
.dial(listen_addr, TargetProtocol::All)
.await
.unwrap();
service.run().await
});
});
assert_eq!(client_receiver.recv(), Ok(Notify::Connected));
unsafe {
SECIO_CONTROL = Some(control.into());
SECIO_RECV = Some(client_receiver);
}
});
// init no secio two peers
START_NO_SECIO.call_once(|| {
let (meta, _receiver) = create_meta(ProtocolId::new(1));
let (addr_sender, addr_receiver) = channel::oneshot::channel::<Multiaddr>();
let mut service = create(false, meta, ());
let control = service.control().clone();
thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
let listen_addr = service
.listen("/ip4/127.0.0.1/tcp/0".parse().unwrap())
.await
.unwrap();
let _res = addr_sender.send(listen_addr);
service.run().await
});
});
let (meta, client_receiver) = create_meta(ProtocolId::new(1));
thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut service = create(false, meta, ());
rt.block_on(async move {
let listen_addr = addr_receiver.await.unwrap();
service
.dial(listen_addr, TargetProtocol::All)
.await
.unwrap();
service.run().await
});
});
assert_eq!(client_receiver.recv(), Ok(Notify::Connected));
unsafe {
NO_SECIO_CONTROL = Some(control.into());
NO_SECIO_RECV = Some(client_receiver);
}
});
}
fn secio_and_send_data(data: &[u8]) {
unsafe {
SECIO_CONTROL.as_mut().map(|control| {
control.filter_broadcast(
TargetSession::All,
ProtocolId::new(1),
Bytes::from(data.to_owned()),
)
});
if let Some(rev) = SECIO_RECV.as_ref() {
assert_eq!(
rev.recv(),
Ok(Notify::Message(bytes::Bytes::from(data.to_owned())))
)
}
}
}
fn no_secio_and_send_data(data: &[u8]) {
unsafe {
NO_SECIO_CONTROL.as_mut().map(|control| {
control.filter_broadcast(TargetSession::All, 1.into(), Bytes::from(data.to_owned()))
});
if let Some(rev) = NO_SECIO_RECV.as_ref() {
assert_eq!(
rev.recv(),
Ok(Notify::Message(bytes::Bytes::from(data.to_owned())))
)
}
}
}
fn main() {
init();
let cycles = std::env::args()
.nth(1)
.and_then(|number| number.parse().ok())
.unwrap_or(100);
let check_point = std::env::args()
.nth(2)
.and_then(|number| number.parse().ok())
.unwrap_or(10);
let mut bench = Bench::default().cycles(cycles).estimated_point(check_point);
let mb = (0..1024 * 1024 * 10)
.map(|_| rand::random::<u8>())
.collect::<Vec<_>>();
let kb = (0..1024 * 10)
.map(|_| rand::random::<u8>())
.collect::<Vec<_>>();
bench.bench_function_with_init("10kb_benchmark_with_secio", &kb, move |data| {
secio_and_send_data(&data)
});
bench.bench_function_with_init("10kb_benchmark_with_no_secio", &kb, move |data| {
no_secio_and_send_data(&data)
});
bench.bench_function_with_init("10mb_benchmark_with_secio", &mb, move |data| {
secio_and_send_data(&data)
});
bench.bench_function_with_init("10mb_benchmark_with_no_secio", &mb, move |data| {
no_secio_and_send_data(&data)
});
}
| 30.571429 | 96 | 0.549844 |
2392ad4d7a12ee76a4c3dd80c4a7c7200a762f86 | 6,296 | //! lint on blocks unnecessarily using >= with a + 1 or - 1
use rustc_ast::ast::{BinOpKind, Expr, ExprKind, Lit, LitKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use crate::utils::{snippet_opt, span_lint_and_then};
declare_clippy_lint! {
/// **What it does:** Checks for usage of `x >= y + 1` or `x - 1 >= y` (and `<=`) in a block
///
///
/// **Why is this bad?** Readability -- better to use `> y` instead of `>= y + 1`.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let x = 1;
/// # let y = 1;
/// if x >= y + 1 {}
/// ```
///
/// Could be written as:
///
/// ```rust
/// # let x = 1;
/// # let y = 1;
/// if x > y {}
/// ```
pub INT_PLUS_ONE,
complexity,
"instead of using `x >= y + 1`, use `x > y`"
}
declare_lint_pass!(IntPlusOne => [INT_PLUS_ONE]);
// cases:
// BinOpKind::Ge
// x >= y + 1
// x - 1 >= y
//
// BinOpKind::Le
// x + 1 <= y
// x <= y - 1
#[derive(Copy, Clone)]
enum Side {
LHS,
RHS,
}
impl IntPlusOne {
#[allow(clippy::cast_sign_loss)]
fn check_lit(lit: &Lit, target_value: i128) -> bool {
if let LitKind::Int(value, ..) = lit.kind {
return value == (target_value as u128);
}
false
}
fn check_binop(cx: &EarlyContext<'_>, binop: BinOpKind, lhs: &Expr, rhs: &Expr) -> Option<String> {
match (binop, &lhs.kind, &rhs.kind) {
// case where `x - 1 >= ...` or `-1 + x >= ...`
(BinOpKind::Ge, &ExprKind::Binary(ref lhskind, ref lhslhs, ref lhsrhs), _) => {
match (lhskind.node, &lhslhs.kind, &lhsrhs.kind) {
// `-1 + x`
(BinOpKind::Add, &ExprKind::Lit(ref lit), _) if Self::check_lit(lit, -1) => {
Self::generate_recommendation(cx, binop, lhsrhs, rhs, Side::LHS)
},
// `x - 1`
(BinOpKind::Sub, _, &ExprKind::Lit(ref lit)) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, lhslhs, rhs, Side::LHS)
},
_ => None,
}
},
// case where `... >= y + 1` or `... >= 1 + y`
(BinOpKind::Ge, _, &ExprKind::Binary(ref rhskind, ref rhslhs, ref rhsrhs))
if rhskind.node == BinOpKind::Add =>
{
match (&rhslhs.kind, &rhsrhs.kind) {
// `y + 1` and `1 + y`
(&ExprKind::Lit(ref lit), _) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, rhsrhs, lhs, Side::RHS)
},
(_, &ExprKind::Lit(ref lit)) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, rhslhs, lhs, Side::RHS)
},
_ => None,
}
}
// case where `x + 1 <= ...` or `1 + x <= ...`
(BinOpKind::Le, &ExprKind::Binary(ref lhskind, ref lhslhs, ref lhsrhs), _)
if lhskind.node == BinOpKind::Add =>
{
match (&lhslhs.kind, &lhsrhs.kind) {
// `1 + x` and `x + 1`
(&ExprKind::Lit(ref lit), _) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, lhsrhs, rhs, Side::LHS)
},
(_, &ExprKind::Lit(ref lit)) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, lhslhs, rhs, Side::LHS)
},
_ => None,
}
}
// case where `... >= y - 1` or `... >= -1 + y`
(BinOpKind::Le, _, &ExprKind::Binary(ref rhskind, ref rhslhs, ref rhsrhs)) => {
match (rhskind.node, &rhslhs.kind, &rhsrhs.kind) {
// `-1 + y`
(BinOpKind::Add, &ExprKind::Lit(ref lit), _) if Self::check_lit(lit, -1) => {
Self::generate_recommendation(cx, binop, rhsrhs, lhs, Side::RHS)
},
// `y - 1`
(BinOpKind::Sub, _, &ExprKind::Lit(ref lit)) if Self::check_lit(lit, 1) => {
Self::generate_recommendation(cx, binop, rhslhs, lhs, Side::RHS)
},
_ => None,
}
},
_ => None,
}
}
fn generate_recommendation(
cx: &EarlyContext<'_>,
binop: BinOpKind,
node: &Expr,
other_side: &Expr,
side: Side,
) -> Option<String> {
let binop_string = match binop {
BinOpKind::Ge => ">",
BinOpKind::Le => "<",
_ => return None,
};
if let Some(snippet) = snippet_opt(cx, node.span) {
if let Some(other_side_snippet) = snippet_opt(cx, other_side.span) {
let rec = match side {
Side::LHS => Some(format!("{} {} {}", snippet, binop_string, other_side_snippet)),
Side::RHS => Some(format!("{} {} {}", other_side_snippet, binop_string, snippet)),
};
return rec;
}
}
None
}
fn emit_warning(cx: &EarlyContext<'_>, block: &Expr, recommendation: String) {
span_lint_and_then(
cx,
INT_PLUS_ONE,
block.span,
"Unnecessary `>= y + 1` or `x - 1 >=`",
|diag| {
diag.span_suggestion(
block.span,
"change it to",
recommendation,
Applicability::MachineApplicable, // snippet
);
},
);
}
}
impl EarlyLintPass for IntPlusOne {
fn check_expr(&mut self, cx: &EarlyContext<'_>, item: &Expr) {
if let ExprKind::Binary(ref kind, ref lhs, ref rhs) = item.kind {
if let Some(ref rec) = Self::check_binop(cx, kind.node, lhs, rhs) {
Self::emit_warning(cx, item, rec.clone());
}
}
}
}
| 35.370787 | 103 | 0.450445 |
48f030512f5ce2824f4877a47f5c3c5e04a02fd6 | 8,173 | /// Mocks for the liquidation-pools pallet.
use super::*;
use crate as liquidation_pools;
use frame_support::{ord_parameter_types, parameter_types, PalletId};
use frame_system::EnsureSignedBy;
use minterest_primitives::Price;
pub use minterest_primitives::{currency::CurrencyType::WrappedToken, Balance, CurrencyId, Rate};
use orml_traits::parameter_type_with_key;
use pallet_traits::PricesManager;
use sp_core::H256;
use sp_io::TestExternalities;
use sp_runtime::testing::TestXt;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
FixedPointNumber,
};
use sp_std::cell::RefCell;
use std::collections::HashMap;
pub use test_helper::*;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
type Block = frame_system::mocking::MockBlock<Test>;
// Configure a mock runtime to test the pallet.
frame_support::construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
//ORML palletts
Tokens: orml_tokens::{Pallet, Storage, Call, Event<T>, Config<T>},
Currencies: orml_currencies::{Pallet, Call, Event<T>},
// Minterest pallets
TestLiquidationPools: liquidation_pools::{Pallet, Storage, Call, Event<T>, ValidateUnsigned},
TestLiquidityPools: liquidity_pools::{Pallet, Storage, Call, Config<T>},
TestDex: dex::{Pallet, Storage, Call, Event<T>},
Controller: controller::{Pallet, Storage, Call, Event, Config<T>},
MntToken: mnt_token::{Pallet, Storage, Call, Event<T>, Config<T>},
MinterestModel: minterest_model::{Pallet, Storage, Call, Event, Config<T>},
}
);
parameter_types! {
pub const MntTokenPalletId: PalletId = PalletId(*b"min/mntt");
pub MntTokenAccountId: AccountId = MntTokenPalletId::get().into_account();
}
mock_impl_system_config!(Test);
mock_impl_liquidity_pools_config!(Test);
mock_impl_orml_tokens_config!(Test);
mock_impl_orml_currencies_config!(Test);
mock_impl_dex_config!(Test);
mock_impl_balances_config!(Test);
mock_impl_controller_config!(Test, ZeroAdmin);
mock_impl_mnt_token_config!(Test, ZeroAdmin);
mock_impl_minterest_model_config!(Test, ZeroAdmin);
parameter_types! {
pub const LiquidityPoolsPalletId: PalletId = PalletId(*b"lqdy/min");
pub LiquidityPoolAccountId: AccountId = LiquidityPoolsPalletId::get().into_account();
pub InitialExchangeRate: Rate = Rate::one();
pub EnabledUnderlyingAssetsIds: Vec<CurrencyId> = CurrencyId::get_enabled_tokens_in_protocol(UnderlyingAsset);
pub EnabledWrappedTokensId: Vec<CurrencyId> = CurrencyId::get_enabled_tokens_in_protocol(WrappedToken);
}
thread_local! {
static UNDERLYING_PRICE: RefCell<HashMap<CurrencyId, Price>> = RefCell::new(
[
(DOT, Price::one()),
(ETH, Price::one()),
(BTC, Price::one()),
(KSM, Price::one()),
]
.iter()
.cloned()
.collect());
}
pub struct MockPriceSource;
impl MockPriceSource {
pub fn set_underlying_price(currency_id: CurrencyId, price: Price) {
UNDERLYING_PRICE.with(|v| v.borrow_mut().insert(currency_id, price));
}
}
impl PricesManager<CurrencyId> for MockPriceSource {
fn get_underlying_price(currency_id: CurrencyId) -> Option<Price> {
UNDERLYING_PRICE.with(|v| v.borrow().get(¤cy_id).copied())
}
fn lock_price(_currency_id: CurrencyId) {}
fn unlock_price(_currency_id: CurrencyId) {}
}
parameter_types! {
pub const LiquidationPoolsPalletId: PalletId = PalletId(*b"lqdn/min");
pub LiquidationPoolAccountId: AccountId = LiquidationPoolsPalletId::get().into_account();
pub const LiquidityPoolsPriority: TransactionPriority = TransactionPriority::max_value();
}
ord_parameter_types! {
pub const ZeroAdmin: AccountId = 0;
}
impl Config for Test {
type Event = Event;
type MultiCurrency = orml_tokens::Pallet<Test>;
type UnsignedPriority = LiquidityPoolsPriority;
type PriceSource = MockPriceSource;
type LiquidationPoolsPalletId = LiquidationPoolsPalletId;
type LiquidationPoolAccountId = LiquidationPoolAccountId;
type UpdateOrigin = EnsureSignedBy<ZeroAdmin, AccountId>;
type LiquidityPoolsManager = liquidity_pools::Pallet<Test>;
type Dex = dex::Pallet<Test>;
type LiquidationPoolsWeightInfo = ();
type ControllerManager = Controller;
}
/// An extrinsic type used for tests.
pub type Extrinsic = TestXt<Call, ()>;
impl<LocalCall> SendTransactionTypes<LocalCall> for Test
where
Call: From<LocalCall>,
{
type OverarchingCall = Call;
type Extrinsic = Extrinsic;
}
pub fn admin() -> Origin {
Origin::signed(ADMIN)
}
pub struct ExternalityBuilder {
endowed_accounts: Vec<(AccountId, CurrencyId, Balance)>,
liquidity_pools: Vec<(CurrencyId, PoolData)>,
liquidation_pools: Vec<(CurrencyId, LiquidationPoolData)>,
}
impl Default for ExternalityBuilder {
fn default() -> Self {
Self {
endowed_accounts: vec![],
liquidity_pools: vec![
(
DOT,
PoolData {
borrowed: Balance::zero(),
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
),
(
ETH,
PoolData {
borrowed: Balance::zero(),
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
),
(
BTC,
PoolData {
borrowed: Balance::zero(),
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
),
],
liquidation_pools: vec![
(
DOT,
LiquidationPoolData {
deviation_threshold: Rate::saturating_from_rational(1, 10),
balance_ratio: Rate::saturating_from_rational(2, 10),
max_ideal_balance_usd: None,
},
),
(
ETH,
LiquidationPoolData {
deviation_threshold: Rate::saturating_from_rational(1, 10),
balance_ratio: Rate::saturating_from_rational(2, 10),
max_ideal_balance_usd: None,
},
),
(
BTC,
LiquidationPoolData {
deviation_threshold: Rate::saturating_from_rational(1, 10),
balance_ratio: Rate::saturating_from_rational(2, 10),
max_ideal_balance_usd: None,
},
),
(
KSM,
LiquidationPoolData {
deviation_threshold: Rate::saturating_from_rational(1, 10),
balance_ratio: Rate::saturating_from_rational(2, 10),
max_ideal_balance_usd: None,
},
),
],
}
}
}
impl ExternalityBuilder {
pub fn user_balance(mut self, user: AccountId, currency_id: CurrencyId, balance: Balance) -> Self {
self.endowed_accounts.push((user, currency_id, balance));
self
}
pub fn set_pool_borrow_underlying(mut self, currency_id: CurrencyId, balance: Balance) -> Self {
self.liquidity_pools.push((
currency_id,
PoolData {
borrowed: balance,
borrow_index: Rate::one(),
protocol_interest: Balance::zero(),
},
));
self
}
pub fn liquidation_pool_balance(mut self, currency_id: CurrencyId, balance: Balance) -> Self {
self.endowed_accounts
.push((TestLiquidationPools::pools_account_id(), currency_id, balance));
self
}
pub fn dex_balance(mut self, currency_id: CurrencyId, balance: Balance) -> Self {
self.endowed_accounts
.push((TestDex::dex_account_id(), currency_id, balance));
self
}
pub fn build(self) -> TestExternalities {
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
orml_tokens::GenesisConfig::<Test> {
balances: self.endowed_accounts,
}
.assimilate_storage(&mut t)
.unwrap();
liquidity_pools::GenesisConfig::<Test> {
pools: self.liquidity_pools,
pool_user_data: vec![],
}
.assimilate_storage(&mut t)
.unwrap();
liquidation_pools::GenesisConfig::<Test> {
liquidation_pools: self.liquidation_pools,
phantom: PhantomData,
}
.assimilate_storage(&mut t)
.unwrap();
let mut ext = TestExternalities::new(t);
ext.execute_with(|| System::set_block_number(1));
ext
}
}
pub(crate) fn set_prices_for_assets(prices: Vec<(CurrencyId, Price)>) {
prices.into_iter().for_each(|(currency_id, price)| {
MockPriceSource::set_underlying_price(currency_id, price);
});
}
pub(crate) fn liquidation_pool_balance(pool_id: CurrencyId) -> Balance {
Currencies::free_balance(pool_id, &TestLiquidationPools::pools_account_id())
}
| 29.085409 | 111 | 0.723724 |
1aa71fce7639f7b33a26daade80924c6961a5813 | 4,091 | use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::path::Path;
use itertools::Itertools;
use tokio::{fs, io::AsyncWriteExt};
use crate::bump::PackageBump;
use crate::changeset::Changeset;
use crate::semantic::Semantic;
use crate::version::{Version, VersionMod, Versioned};
fn capitalize(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().chain(c).collect(),
}
}
fn fill_output<V: AsChangelogFmt + Versioned + Ord>(
next_version: &Version<V>,
patches: &HashMap<VersionMod<V>, Vec<String>>,
) -> String {
let mut output = String::new();
output.push_str(&next_version.as_changelog_fmt());
for (version, changes) in patches.iter().sorted_by(|(a, _), (b, _)| Ord::cmp(&b, &a)) {
output.push('\n');
output.push_str(&version.as_changelog_fmt());
output.push('\n');
output.push_str(&changes.join("\n"));
}
output
}
fn create_patches<V>(
package_name: &str,
changesets: Vec<&Changeset<V>>,
) -> HashMap<VersionMod<V>, Vec<String>>
where
V: AsChangelogFmt + Clone + Hash + Ord + Versioned,
{
let mut patches: HashMap<VersionMod<V>, Vec<String>> = HashMap::new();
for changset in changesets {
let changeset_summary = changset.as_changelog_fmt();
if let Some(version) = changset.packages.get(package_name) {
if let Some(changes) = patches.get_mut(version) {
changes.push(changeset_summary);
} else {
patches.insert(version.clone(), vec![changeset_summary]);
}
}
}
patches
}
pub struct Changelog;
impl Changelog {
pub async fn update_changelog<T, V>(
changelog_path: T,
next_version: Version<V>,
package_bump: &PackageBump<'_, V>,
dry_run: bool,
) -> std::io::Result<()>
where
T: AsRef<Path> + Debug,
V: AsChangelogFmt + Clone + Hash + Ord + Versioned,
{
let package_name = package_bump.name();
if let Some(patches) = package_bump
.changesets()
.map(|changesets| create_patches(package_name, changesets))
{
if dry_run {
println!(
"dry_run - update changelog {:?}\n{}",
changelog_path,
fill_output(&next_version, &patches)
.split('\n')
.map(|val| format!("dry_run: + {}", val))
.join("\n")
);
} else {
let changelog = fs::read_to_string(&changelog_path)
.await
.unwrap_or_else(|_| format!("# {}\n", package_name));
let mut changelog_lines = changelog.split('\n');
if let Some(title) = changelog_lines.next() {
let mut output = String::new();
output.push_str(title);
output.push('\n');
output.push('\n');
output.push_str(&fill_output(&next_version, &patches));
let mut changelog = fs::File::create(&changelog_path).await?;
changelog.write(output.as_bytes()).await?;
changelog
.write(changelog_lines.join("\n").as_bytes())
.await?;
}
}
}
Ok(())
}
}
pub trait AsChangelogFmt: Sized {
fn as_changelog_fmt(&self) -> String;
}
impl<T> AsChangelogFmt for Changeset<T> {
fn as_changelog_fmt(&self) -> String {
let mut changeset_summary = String::new();
let mut parts = self.message.split('\n');
if let Some(value) = parts.next() {
changeset_summary.push_str("- ");
changeset_summary.push_str(value);
changeset_summary.push('\n');
for part in parts {
changeset_summary.push_str(" ");
changeset_summary.push_str(part);
changeset_summary.push('\n');
}
}
changeset_summary
}
}
impl AsChangelogFmt for Semantic {
fn as_changelog_fmt(&self) -> String {
capitalize(&self.to_string())
}
}
impl<T: AsChangelogFmt> AsChangelogFmt for VersionMod<T> {
fn as_changelog_fmt(&self) -> String {
format!("### {} Changes\n", self.version.as_changelog_fmt())
}
}
impl<T> AsChangelogFmt for Version<T> {
fn as_changelog_fmt(&self) -> String {
format!("## {}\n", self.value)
}
}
| 24.497006 | 89 | 0.610853 |
ffbd5aede5845462510c77e0b1850a08e45a2d7a | 16,922 | #![allow(dead_code)]
v_escape::new!(MyEscape, "60->foo");
macro_rules! test {
($name:ident, $escapes:expr, $escaped:expr) => {
use std::borrow::Cow;
use std::char::from_u32;
fn all_utf8_less(less: &str) -> String {
assert_eq!(less.len(), less.as_bytes().len());
let less = less.as_bytes();
let mut buf = String::with_capacity(204_672 - less.len());
for i in 0..0x80u8 {
if !less.contains(&i) {
buf.push(from_u32(i as u32).unwrap())
}
}
for i in 0x80..0xD800 {
buf.push(from_u32(i).unwrap());
}
for i in 0xE000..0x11000 {
buf.push(from_u32(i).unwrap());
}
buf
}
let empty = "";
let escapes = $escapes;
let escaped = $escaped;
let utf8: &str = &all_utf8_less($escapes);
let empty_heap = String::new();
let short = "foobar";
let string_long: &str = &short.repeat(1024);
let string = $escapes.to_string();
let cow = Cow::Owned($escapes.to_string());
let mut buf = String::with_capacity(escaped.len());
for c in escapes.chars() {
use std::fmt::Write;
write!(buf, "{}", escape_char(c)).unwrap();
}
assert_eq!(buf, escaped);
for c in utf8.chars() {
assert_eq!(escape_char(c).to_string(), c.to_string());
}
assert_eq!($name::from(empty).to_string(), empty);
assert_eq!($name::from(escapes).to_string(), escaped);
assert_eq!(escape(&empty_heap).to_string(), empty);
assert_eq!(escape(&cow).to_string(), escaped);
assert_eq!(escape(&string).to_string(), escaped);
assert_eq!(escape(&utf8).to_string(), utf8);
assert_eq!($name::from(string_long).to_string(), string_long);
assert_eq!(
$name::from(escapes.repeat(1024).as_ref()).to_string(),
escaped.repeat(1024)
);
assert_eq!(
$name::from([short, escapes, short].join("").as_ref()).to_string(),
[short, escaped, short].join("")
);
assert_eq!(
$name::from([escapes, short].join("").as_ref()).to_string(),
[escaped, short].join("")
);
assert_eq!(
$name::from(["f", escapes, short].join("").as_ref()).to_string(),
["f", escaped, short].join("")
);
assert_eq!(
$name::from(["f", escapes].join("").as_ref()).to_string(),
["f", escaped].join("")
);
assert_eq!(
$name::from(["fo", escapes].join("").as_ref()).to_string(),
["fo", escaped].join("")
);
assert_eq!(
$name::from(["fo", escapes, "b"].join("").as_ref()).to_string(),
["fo", escaped, "b"].join("")
);
assert_eq!(
$name::from(escapes.repeat(2).as_ref()).to_string(),
escaped.repeat(2)
);
assert_eq!(
$name::from(escapes.repeat(3).as_ref()).to_string(),
escaped.repeat(3)
);
assert_eq!(
$name::from(["f", &escapes.repeat(2)].join("").as_ref()).to_string(),
["f", &escaped.repeat(2)].join("")
);
assert_eq!(
$name::from(["fo", &escapes.repeat(2)].join("").as_ref()).to_string(),
["fo", &escaped.repeat(2)].join("")
);
assert_eq!(
$name::from(["fo", &escapes.repeat(2), "bar"].join("").as_ref()).to_string(),
["fo", &escaped.repeat(2), "bar"].join("")
);
assert_eq!(
$name::from(["fo", &escapes.repeat(3), "bar"].join("").as_ref()).to_string(),
["fo", &escaped.repeat(3), "bar"].join("")
);
assert_eq!(
$name::from([&escapes.repeat(3), "bar"].join("").as_ref()).to_string(),
[&escaped.repeat(3), "bar"].join("")
);
assert_eq!(
$name::from([short, &escapes.repeat(3), "bar"].join("").as_ref()).to_string(),
[short, &escaped.repeat(3), "bar"].join("")
);
assert_eq!(
$name::from([short, &escapes.repeat(5), "bar"].join("").as_ref()).to_string(),
[short, &escaped.repeat(5), "bar"].join("")
);
assert_eq!(
$name::from(
[string_long, &escapes.repeat(13)]
.join("")
.repeat(1024)
.as_ref()
)
.to_string(),
[string_long, &escaped.repeat(13)].join("").repeat(1024)
);
assert_eq!(
$name::from([utf8, escapes, short].join("").as_ref()).to_string(),
[utf8, escaped, short].join("")
);
assert_eq!(
$name::from([utf8, escapes, utf8].join("").as_ref()).to_string(),
[utf8, escaped, utf8].join("")
);
assert_eq!(
$name::from([&utf8.repeat(124), escapes, utf8].join("").as_ref()).to_string(),
[&utf8.repeat(124), escaped, utf8].join("")
);
assert_eq!(
$name::from(
[escapes, &utf8.repeat(124), escapes, utf8]
.join("")
.as_ref()
)
.to_string(),
[escaped, &utf8.repeat(124), escaped, utf8].join("")
);
assert_eq!(
$name::from(
[escapes, &utf8.repeat(124), escapes, utf8, escapes]
.join("")
.as_ref()
)
.to_string(),
[escaped, &utf8.repeat(124), escaped, utf8, escaped].join("")
);
};
}
macro_rules! maybe_init {
($b:ident, $l:expr) => {
unsafe { from_raw_parts(&$b as *const _ as *const u8, $l) }
};
}
macro_rules! test_ptr {
($escapes:expr, $escaped:expr) => {{
use std::mem::MaybeUninit;
use std::slice::from_raw_parts;
let empty = "";
let escapes = $escapes;
let escaped = $escaped;
let short = "foobar";
let long = "foobar".repeat(100);
let mix = long.clone() + escapes + short + &long;
let mix_escaped = long.clone() + escaped + short + &long;
let mix_2 = long.repeat(3) + &escapes.repeat(3) + short + &escapes.repeat(2) + &long;
let mix_escaped_2 =
long.repeat(3) + &escaped.repeat(3) + short + &escaped.repeat(2) + &long;
let mut buf = [MaybeUninit::uninit(); 2048];
assert_eq!(f_escape(empty.as_bytes(), &mut buf), Some(empty.len()));
assert_eq!(f_escape(short.as_bytes(), &mut buf), Some(short.len()));
assert_eq!(maybe_init!(buf, short.len()), short.as_bytes());
let mut buf = [MaybeUninit::uninit(); 2048];
assert_eq!(f_escape(long.as_bytes(), &mut buf), Some(long.len()));
assert_eq!(maybe_init!(buf, long.len()), long.as_bytes());
let mut buf = [MaybeUninit::uninit(); 2048];
assert_eq!(f_escape(escapes.as_bytes(), &mut buf), Some(escaped.len()));
assert_eq!(maybe_init!(buf, escaped.len()), escaped.as_bytes());
let mut buf = [MaybeUninit::uninit(); 2048];
assert_eq!(f_escape(mix.as_bytes(), &mut buf), Some(mix_escaped.len()));
assert_eq!(maybe_init!(buf, mix_escaped.len()), mix_escaped.as_bytes());
let mut buf = [MaybeUninit::uninit(); 10240];
assert_eq!(
f_escape(mix_2.as_bytes(), &mut buf),
Some(mix_escaped_2.len())
);
assert_eq!(
maybe_init!(buf, mix_escaped_2.len()),
mix_escaped_2.as_bytes()
);
let mut buf = [MaybeUninit::uninit(); 2048];
let mut cur = 0;
for c in escapes.chars() {
if let Some(i) = f_escape_char(c, &mut buf[cur..]) {
cur += i;
} else {
panic!("overflow");
}
}
assert_eq!(maybe_init!(buf, escaped.len()), escaped.as_bytes());
let mut buf = [MaybeUninit::uninit(); 0];
assert_eq!(f_escape(empty.as_bytes(), &mut buf), Some(empty.len()));
assert_eq!(f_escape(short.as_bytes(), &mut buf), None);
let mut buf = [MaybeUninit::uninit(); 599];
assert_eq!(f_escape(long.as_bytes(), &mut buf), None);
let mut buf = [MaybeUninit::uninit(); 600];
assert_eq!(f_escape(long.as_bytes(), &mut buf), Some(long.len()));
assert_eq!(maybe_init!(buf, long.len()), long.as_bytes());
}};
}
#[test]
fn test_escape() {
test!(MyEscape, "<", "foo");
test_ptr!("<", "foo")
}
mod bytes_buff {
v_escape::new!(MyE, "65->a || 60->b || 61->c || 66->d || 80->e || 81->f");
#[test]
fn test_escape() {
use bytes::BytesMut;
let empty = "";
let escapes = "<=ABPQ";
let escaped = "bcadef";
let short = "foobar";
let long = "foobar".repeat(100);
let mix = long.clone() + escapes + short + &long;
let mix_escaped = long.clone() + escaped + short + &long;
let mix_2 = long.repeat(3) + &escapes.repeat(3) + short + &escapes.repeat(2) + &long;
let mix_escaped_2 =
long.repeat(3) + &escaped.repeat(3) + short + &escaped.repeat(2) + &long;
let mut buf = BytesMut::new();
b_escape(empty.as_bytes(), &mut buf);
assert_eq!(buf.len(), 0);
b_escape(short.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), short.as_bytes());
let mut buf = BytesMut::new();
b_escape(long.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), long.as_bytes());
let mut buf = BytesMut::new();
b_escape(escapes.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), escaped.as_bytes());
let mut buf = BytesMut::new();
b_escape(mix.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), mix_escaped.as_bytes());
let mut buf = BytesMut::new();
b_escape(mix_2.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), mix_escaped_2.as_bytes());
let mut buf = BytesMut::with_capacity(4);
for c in escapes.chars() {
b_escape_char(c, &mut buf);
}
assert_eq!(buf.as_ref(), escaped.as_bytes());
let mut buf = BytesMut::with_capacity(0);
b_escape_char('\u{3A3}', &mut buf);
assert_eq!(buf.as_ref(), "\u{3A3}".as_bytes())
}
}
mod bytes_buff_nosimd {
v_escape::new!(
MyE,
"65->a || 60->b || 61->c || 66->d || 80->e || 81->f",
simd = false
);
#[test]
fn test_escape() {
use bytes::BytesMut;
let empty = "";
let escapes = "<=ABPQ";
let escaped = "bcadef";
let short = "foobar";
let long = "foobar".repeat(100);
let mix = long.clone() + escapes + short + &long;
let mix_escaped = long.clone() + escaped + short + &long;
let mix_2 = long.repeat(3) + &escapes.repeat(3) + short + &escapes.repeat(2) + &long;
let mix_escaped_2 =
long.repeat(3) + &escaped.repeat(3) + short + &escaped.repeat(2) + &long;
let mut buf = BytesMut::new();
b_escape(empty.as_bytes(), &mut buf);
assert_eq!(buf.len(), 0);
b_escape(short.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), short.as_bytes());
let mut buf = BytesMut::new();
b_escape(long.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), long.as_bytes());
let mut buf = BytesMut::new();
b_escape(escapes.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), escaped.as_bytes());
let mut buf = BytesMut::new();
b_escape(mix.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), mix_escaped.as_bytes());
let mut buf = BytesMut::new();
b_escape(mix_2.as_bytes(), &mut buf);
assert_eq!(buf.as_ref(), mix_escaped_2.as_bytes());
let mut buf = BytesMut::with_capacity(4);
for c in escapes.chars() {
b_escape_char(c, &mut buf);
}
assert_eq!(buf.as_ref(), escaped.as_bytes());
let mut buf = BytesMut::with_capacity(0);
b_escape_char('\u{3A3}', &mut buf);
assert_eq!(buf.as_ref(), "\u{3A3}".as_bytes())
}
}
mod no_simd {
mod a {
v_escape::new!(
MyE,
"65->a || 60->b || 61->c || 66->d || 80->e || 81->f",
simd = false
);
#[test]
fn test_escape() {
test!(MyE, "<=ABPQ", "bcadef");
test_ptr!("<=ABPQ", "bcadef");
}
}
mod b {
v_escape::new!(
MyE,
"65->a || 60->b || 61->c || 66->d || 80->e || 81->f",
simd = false
);
#[test]
fn test_escape() {
test!(MyE, "<=ABPQ", "bcadef");
test_ptr!("<=ABPQ", "bcadef");
}
}
}
#[cfg(target_arch = "x86_64")]
mod no_avx {
mod a {
v_escape::new!(
MyE,
"65->a || 60->b || 61->c || 66->d || 80->e || 81->f",
avx = false
);
#[test]
fn test_escape() {
test!(MyE, "<=ABPQ", "bcadef");
test_ptr!("<=ABPQ", "bcadef");
}
}
mod b {
v_escape::new!(
MyE,
"65->a || 60->b || 61->c || 66->d || 80->e || 81->f",
avx = false
);
#[test]
fn test_escape() {
test!(MyE, "<=ABPQ", "bcadef");
test_ptr!("<=ABPQ", "bcadef");
}
}
}
mod empty {
v_escape::new!(MyE, "65->");
#[test]
fn test_escape() {
test!(MyE, "A", "");
test_ptr!("A", "");
}
}
#[cfg(target_arch = "x86_64")]
mod test_avx {
mod numbers {
v_escape::new!(
MyE,
"#0->zero || #1->one || #2->two || #3->three || #4->four || #5->five || \
#6->six || #7->seven || #8->eight || #9->nine"
);
#[test]
fn test_escape_a() {
test!(
MyE,
"0123456789",
"zeroonetwothreefourfivesixseveneightnine"
);
test_ptr!("0123456789", "zeroonetwothreefourfivesixseveneightnine");
}
#[test]
fn test_escape_b() {
test!(
MyE,
"0 1-2 3-4 56789",
"zero one-two three-four fivesixseveneightnine"
);
test_ptr!(
"0 1-2 3-4 56789",
"zero one-two three-four fivesixseveneightnine"
);
}
}
mod a {
// 3 ranges
v_escape::new!(MyE, "65->a || 60->b || 61->c || 66->d || 80->e || 81->f");
#[test]
fn test_escape() {
test!(MyE, "<=ABPQ", "bcadef");
test_ptr!("<=ABPQ", "bcadef");
}
}
mod b {
// 2 ranges and 1 escape
v_escape::new!(MyE, "60->a || 61->b || 65->c || 80->d || 81->e");
#[test]
fn test_escape() {
test!(MyE, "<=APQ", "abcde");
test_ptr!("<=APQ", "abcde");
}
}
mod c {
// 1 range and 2 escapes
v_escape::new!(MyE, "60->a || 65->c || 80->d || 62->e");
#[test]
fn test_escape() {
test!(MyE, "<>AP", "aecd");
test_ptr!("<>AP", "aecd");
}
}
mod d {
// 3 escapes
v_escape::new!(MyE, "60->a || 80->b || 65->c");
#[test]
fn test_escape() {
test!(MyE, "<AP", "acb");
test_ptr!("<AP", "acb");
}
}
mod e {
// 2 ranges
v_escape::new!(MyE, "60->a || 61->b || 81->c || 80->d || 62->e");
#[test]
fn test_escape() {
test!(MyE, "<=>PQ", "abedc");
test_ptr!("<=>PQ", "abedc");
}
}
mod f {
// 1 range and 1 escape
v_escape::new!(MyE, "60->a || 61->b || 80->c || 62->d");
#[test]
fn test_escape() {
test!(MyE, "<=>P", "abdc");
test_ptr!("<=>P", "abdc");
}
}
mod g {
// 2 escapes
v_escape::new!(MyE, "60->a || 80->b");
#[test]
fn test_escape() {
test!(MyE, "<P", "ab");
test_ptr!("<P", "ab");
}
}
mod h {
// 1 range
v_escape::new!(MyE, "60->a || 61->b");
#[test]
fn test_escape() {
test!(MyE, "<=", "ab");
test_ptr!("<=", "ab");
}
}
mod i {
// 1 escapes
v_escape::new!(MyE, "60->f");
#[test]
fn test_escape() {
test!(MyE, "<", "f");
test_ptr!("<", "f");
}
}
}
mod char_syntax {
mod a {
v_escape::new!(MyE, " ->f");
#[test]
fn test_escape() {
test!(MyE, " ", "f");
test_ptr!(" ", "f");
}
}
}
| 30.711434 | 93 | 0.467793 |
c11b6899b4821ff92e18bd27f97c2a3e9d3952e8 | 517 | //! # oso policy engine for authorization
//!
//! TODO: API documentation
#[macro_use]
pub mod macros;
pub(crate) mod builtins;
mod errors;
mod host;
mod oso;
mod query;
pub use crate::oso::Oso;
pub use errors::{OsoError, Result};
pub use host::{Class, FromPolar, HostClass, ToPolar};
pub use polar_core::{polar::Polar, terms::Value};
pub use query::{Query, ResultSet};
pub trait PolarClass {
fn get_polar_class() -> Class<()>;
fn get_polar_class_builder() -> Class<Self>
where
Self: Sized;
}
| 19.884615 | 53 | 0.682785 |
fb21eb0356161b66859ce2703ac22a2a567230b7 | 1,297 | /*
* scaledjobs.keda.sh
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1
*
* Generated by: https://openapi-generator.tech
*/
/// ScaledJobJobTargetRefTemplateSpecRbdSecretRef : SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, JsonSchema)]
pub struct ScaledJobJobTargetRefTemplateSpecRbdSecretRef {
/// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
impl ScaledJobJobTargetRefTemplateSpecRbdSecretRef {
/// SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
pub fn new() -> ScaledJobJobTargetRefTemplateSpecRbdSecretRef {
ScaledJobJobTargetRefTemplateSpecRbdSecretRef {
name: None,
}
}
}
| 40.53125 | 229 | 0.74788 |
fe7697447acda8a897a3f7af36cc0e92c80672df | 1,088 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod doc;
pub mod macros;
pub mod inline;
pub mod monomorphize;
pub mod controlflow;
pub mod glue;
pub mod datum;
pub mod callee;
pub mod expr;
pub mod common;
pub mod context;
pub mod consts;
pub mod type_of;
pub mod build;
pub mod builder;
pub mod base;
pub mod _match;
pub mod closure;
pub mod tvec;
pub mod meth;
pub mod cabi;
pub mod cabi_x86;
pub mod cabi_x86_64;
pub mod cabi_x86_win64;
pub mod cabi_arm;
pub mod cabi_mips;
pub mod foreign;
pub mod intrinsic;
pub mod debuginfo;
pub mod machine;
pub mod adt;
pub mod asm;
pub mod type_;
pub mod value;
pub mod basic_block;
pub mod llrepr;
pub mod cleanup;
| 22.666667 | 69 | 0.753676 |
feca2dceb70f85b7b6eaba7d648b795c5725aa40 | 24,421 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Noise Socket
use futures::{
future::poll_fn,
io::{AsyncRead, AsyncWrite},
ready, try_ready,
};
use logger::prelude::*;
use std::{
convert::TryInto,
io,
pin::Pin,
task::{Context, Poll},
};
const MAX_PAYLOAD_LENGTH: usize = u16::max_value() as usize; // 65535
// The maximum number of bytes that we can buffer is 16 bytes less than u16::max_value() because
// encrypted messages include a tag along with the payload.
const MAX_WRITE_BUFFER_LENGTH: usize = u16::max_value() as usize - 16; // 65519
/// Collection of buffers used for buffering data during the various read/write states of a
/// NoiseSocket
struct NoiseBuffers {
/// Encrypted frame read from the wire
read_encrypted: [u8; MAX_PAYLOAD_LENGTH],
/// Decrypted data read from the wire (produced by having snow decrypt the `read_encrypted`
/// buffer)
read_decrypted: [u8; MAX_PAYLOAD_LENGTH],
/// Unencrypted data intended to be written to the wire
write_decrypted: [u8; MAX_WRITE_BUFFER_LENGTH],
/// Encrypted data to write to the wire (produced by having snow encrypt the `write_decrypted`
/// buffer)
write_encrypted: [u8; MAX_PAYLOAD_LENGTH],
}
impl NoiseBuffers {
fn new() -> Self {
Self {
read_encrypted: [0; MAX_PAYLOAD_LENGTH],
read_decrypted: [0; MAX_PAYLOAD_LENGTH],
write_decrypted: [0; MAX_WRITE_BUFFER_LENGTH],
write_encrypted: [0; MAX_PAYLOAD_LENGTH],
}
}
}
/// Hand written Debug implementation in order to omit the printing of huge buffers of data
impl ::std::fmt::Debug for NoiseBuffers {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct("NoiseBuffers").finish()
}
}
/// Possible read states for a [NoiseSocket]
#[derive(Debug)]
enum ReadState {
/// Initial State
Init,
/// Read frame length
ReadFrameLen { buf: [u8; 2], offset: usize },
/// Read encrypted frame
ReadFrame { frame_len: u16, offset: usize },
/// Copy decrypted frame to provided buffer
CopyDecryptedFrame { decrypted_len: usize, offset: usize },
/// End of file reached, result indicated if EOF was expected or not
Eof(Result<(), ()>),
/// Decryption Error
DecryptionError(snow::SnowError),
}
/// Possible write states for a [NoiseSocket]
#[derive(Debug)]
enum WriteState {
/// Initial State
Init,
/// Buffer provided data
BufferData { offset: usize },
/// Write frame length to the wire
WriteFrameLen {
frame_len: u16,
buf: [u8; 2],
offset: usize,
},
/// Write encrypted frame to the wire
WriteEncryptedFrame { frame_len: u16, offset: usize },
/// Flush the underlying socket
Flush,
/// End of file reached
Eof,
/// Encryption Error
EncryptionError(snow::SnowError),
}
/// A Noise session with a remote
///
/// Encrypts data to be written to and decrypts data that is read from the underlying socket using
/// the noise protocol. This is done by wrapping noise payloads in u16 (big endian) length prefix
/// frames.
#[derive(Debug)]
pub struct NoiseSocket<TSocket> {
socket: TSocket,
session: snow::Session,
buffers: Box<NoiseBuffers>,
read_state: ReadState,
write_state: WriteState,
}
impl<TSocket> NoiseSocket<TSocket> {
fn new(socket: TSocket, session: snow::Session) -> Self {
Self {
socket,
session,
buffers: Box::new(NoiseBuffers::new()),
read_state: ReadState::Init,
write_state: WriteState::Init,
}
}
/// Pull out the static public key of the remote
pub fn get_remote_static(&self) -> Option<&[u8]> {
self.session.get_remote_static()
}
}
fn poll_write_all<TSocket>(
mut context: &mut Context,
mut socket: Pin<&mut TSocket>,
buf: &[u8],
offset: &mut usize,
) -> Poll<io::Result<()>>
where
TSocket: AsyncWrite,
{
loop {
let n = ready!(socket.as_mut().poll_write(&mut context, &buf[*offset..]))?;
trace!("poll_write_all: wrote {}/{} bytes", *offset + n, buf.len());
if n == 0 {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
*offset += n;
assert!(*offset <= buf.len());
if *offset == buf.len() {
return Poll::Ready(Ok(()));
}
}
}
/// Read a u16 frame length from `socket`.
///
/// Can result in the following output:
/// 1) Ok(None) => EOF; remote graceful shutdown
/// 2) Err(UnexpectedEOF) => read 1 byte then hit EOF; remote died
/// 3) Ok(Some(n)) => new frame of length n
fn poll_read_u16frame_len<TSocket>(
context: &mut Context,
socket: Pin<&mut TSocket>,
buf: &mut [u8; 2],
offset: &mut usize,
) -> Poll<io::Result<Option<u16>>>
where
TSocket: AsyncRead,
{
match ready!(poll_read_exact(context, socket, buf, offset)) {
Ok(()) => Poll::Ready(Ok(Some(u16::from_be_bytes(*buf)))),
Err(e) => {
if *offset == 0 && e.kind() == io::ErrorKind::UnexpectedEof {
return Poll::Ready(Ok(None));
}
Poll::Ready(Err(e))
}
}
}
fn poll_read_exact<TSocket>(
mut context: &mut Context,
mut socket: Pin<&mut TSocket>,
buf: &mut [u8],
offset: &mut usize,
) -> Poll<io::Result<()>>
where
TSocket: AsyncRead,
{
loop {
let n = ready!(socket.as_mut().poll_read(&mut context, &mut buf[*offset..]))?;
trace!("poll_read_exact: read {}/{} bytes", *offset + n, buf.len());
if n == 0 {
return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into()));
}
*offset += n;
assert!(*offset <= buf.len());
if *offset == buf.len() {
return Poll::Ready(Ok(()));
}
}
}
impl<TSocket> NoiseSocket<TSocket>
where
TSocket: AsyncRead + Unpin,
{
fn poll_read(&mut self, mut context: &mut Context, buf: &mut [u8]) -> Poll<io::Result<usize>> {
loop {
trace!("NoiseSocket ReadState::{:?}", self.read_state);
match self.read_state {
ReadState::Init => {
self.read_state = ReadState::ReadFrameLen {
buf: [0, 0],
offset: 0,
};
}
ReadState::ReadFrameLen {
ref mut buf,
ref mut offset,
} => {
match ready!(poll_read_u16frame_len(
&mut context,
Pin::new(&mut self.socket),
buf,
offset
)) {
Ok(Some(frame_len)) => {
// Empty Frame
if frame_len == 0 {
self.read_state = ReadState::Init;
} else {
self.read_state = ReadState::ReadFrame {
frame_len,
offset: 0,
};
}
}
Ok(None) => {
self.read_state = ReadState::Eof(Ok(()));
}
Err(e) => {
if e.kind() == io::ErrorKind::UnexpectedEof {
self.read_state = ReadState::Eof(Err(()));
}
return Poll::Ready(Err(e));
}
}
}
ReadState::ReadFrame {
frame_len,
ref mut offset,
} => {
match ready!(poll_read_exact(
&mut context,
Pin::new(&mut self.socket),
&mut self.buffers.read_encrypted[..(frame_len as usize)],
offset
)) {
Ok(()) => {
match self.session.read_message(
&self.buffers.read_encrypted[..(frame_len as usize)],
&mut self.buffers.read_decrypted,
) {
Ok(decrypted_len) => {
self.read_state = ReadState::CopyDecryptedFrame {
decrypted_len,
offset: 0,
};
}
Err(e) => {
error!("Decryption Error: {}", e);
self.read_state = ReadState::DecryptionError(e);
}
}
}
Err(e) => {
if e.kind() == io::ErrorKind::UnexpectedEof {
self.read_state = ReadState::Eof(Err(()));
}
return Poll::Ready(Err(e));
}
}
}
ReadState::CopyDecryptedFrame {
decrypted_len,
ref mut offset,
} => {
let bytes_to_copy =
::std::cmp::min(decrypted_len as usize - *offset, buf.len());
buf[..bytes_to_copy].copy_from_slice(
&self.buffers.read_decrypted[*offset..(*offset + bytes_to_copy)],
);
trace!(
"CopyDecryptedFrame: copied {}/{} bytes",
*offset + bytes_to_copy,
decrypted_len
);
*offset += bytes_to_copy;
if *offset == decrypted_len as usize {
self.read_state = ReadState::Init;
}
return Poll::Ready(Ok(bytes_to_copy));
}
ReadState::Eof(Ok(())) => return Poll::Ready(Ok(0)),
ReadState::Eof(Err(())) => {
return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into()))
}
ReadState::DecryptionError(ref e) => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("DecryptionError: {}", e),
)))
}
}
}
}
}
impl<TSocket> AsyncRead for NoiseSocket<TSocket>
where
TSocket: AsyncRead + Unpin,
{
fn poll_read(
self: Pin<&mut Self>,
context: &mut Context,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
self.get_mut().poll_read(context, buf)
}
}
impl<TSocket> NoiseSocket<TSocket>
where
TSocket: AsyncWrite + Unpin,
{
fn poll_write_or_flush(
&mut self,
mut context: &mut Context,
buf: Option<&[u8]>,
) -> Poll<io::Result<Option<usize>>> {
loop {
trace!(
"NoiseSocket {} WriteState::{:?}",
if buf.is_some() {
"poll_write"
} else {
"poll_flush"
},
self.write_state,
);
match self.write_state {
WriteState::Init => {
if buf.is_some() {
self.write_state = WriteState::BufferData { offset: 0 };
} else {
return Poll::Ready(Ok(None));
}
}
WriteState::BufferData { ref mut offset } => {
let bytes_buffered = if let Some(buf) = buf {
let bytes_to_copy =
::std::cmp::min(MAX_WRITE_BUFFER_LENGTH - *offset, buf.len());
self.buffers.write_decrypted[*offset..(*offset + bytes_to_copy)]
.copy_from_slice(&buf[..bytes_to_copy]);
trace!("BufferData: buffered {}/{} bytes", bytes_to_copy, buf.len());
*offset += bytes_to_copy;
Some(bytes_to_copy)
} else {
None
};
if buf.is_none() || *offset == MAX_WRITE_BUFFER_LENGTH {
match self.session.write_message(
&self.buffers.write_decrypted[..*offset],
&mut self.buffers.write_encrypted,
) {
Ok(encrypted_len) => {
let frame_len = encrypted_len
.try_into()
.expect("offset should be able to fit in u16");
self.write_state = WriteState::WriteFrameLen {
frame_len,
buf: u16::to_be_bytes(frame_len),
offset: 0,
};
}
Err(e) => {
error!("Encryption Error: {}", e);
let err = io::Error::new(
io::ErrorKind::InvalidData,
format!("EncryptionError: {}", e),
);
self.write_state = WriteState::EncryptionError(e);
return Poll::Ready(Err(err));
}
}
}
if let Some(bytes_buffered) = bytes_buffered {
return Poll::Ready(Ok(Some(bytes_buffered)));
}
}
WriteState::WriteFrameLen {
frame_len,
ref buf,
ref mut offset,
} => {
match ready!(poll_write_all(
&mut context,
Pin::new(&mut self.socket),
buf,
offset
)) {
Ok(()) => {
self.write_state = WriteState::WriteEncryptedFrame {
frame_len,
offset: 0,
};
}
Err(e) => {
if e.kind() == io::ErrorKind::WriteZero {
self.write_state = WriteState::Eof;
}
return Poll::Ready(Err(e));
}
}
}
WriteState::WriteEncryptedFrame {
frame_len,
ref mut offset,
} => {
match ready!(poll_write_all(
&mut context,
Pin::new(&mut self.socket),
&self.buffers.write_encrypted[..(frame_len as usize)],
offset
)) {
Ok(()) => {
self.write_state = WriteState::Flush;
}
Err(e) => {
if e.kind() == io::ErrorKind::WriteZero {
self.write_state = WriteState::Eof;
}
return Poll::Ready(Err(e));
}
}
}
WriteState::Flush => {
try_ready!(Pin::new(&mut self.socket).poll_flush(&mut context));
self.write_state = WriteState::Init;
}
WriteState::Eof => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())),
WriteState::EncryptionError(ref e) => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("EncryptionError: {}", e),
)))
}
}
}
}
fn poll_write(&mut self, context: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
if let Some(bytes_written) = try_ready!(self.poll_write_or_flush(context, Some(buf))) {
Poll::Ready(Ok(bytes_written))
} else {
unreachable!();
}
}
fn poll_flush(&mut self, context: &mut Context) -> Poll<io::Result<()>> {
if try_ready!(self.poll_write_or_flush(context, None)).is_none() {
Poll::Ready(Ok(()))
} else {
unreachable!();
}
}
}
impl<TSocket> AsyncWrite for NoiseSocket<TSocket>
where
TSocket: AsyncWrite + Unpin,
{
fn poll_write(
self: Pin<&mut Self>,
context: &mut Context,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.get_mut().poll_write(context, buf)
}
fn poll_flush(self: Pin<&mut Self>, context: &mut Context) -> Poll<io::Result<()>> {
self.get_mut().poll_flush(context)
}
fn poll_close(mut self: Pin<&mut Self>, context: &mut Context) -> Poll<io::Result<()>> {
Pin::new(&mut self.socket).poll_close(context)
}
}
/// Represents a noise session which still needs to have a handshake performed.
pub(super) struct Handshake<TSocket>(NoiseSocket<TSocket>);
impl<TSocket> Handshake<TSocket> {
/// Build a new `Handshake` struct given a socket and a new snow Session
pub fn new(socket: TSocket, session: snow::Session) -> Self {
let noise_socket = NoiseSocket::new(socket, session);
Self(noise_socket)
}
}
impl<TSocket> Handshake<TSocket>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
/// Perform a Single Round-Trip noise IX handshake returning the underlying [NoiseSocket]
/// (switched to transport mode) upon success.
pub async fn handshake_1rt(mut self) -> io::Result<NoiseSocket<TSocket>> {
// The Dialer
if self.0.session.is_initiator() {
// -> e, s
self.send().await?;
self.flush().await?;
// <- e, ee, se, s, es
self.receive().await?;
} else {
// -> e, s
self.receive().await?;
// <- e, ee, se, s, es
self.send().await?;
self.flush().await?;
}
self.finish()
}
/// Send handshake message to remote.
async fn send(&mut self) -> io::Result<()> {
poll_fn(|context| self.0.poll_write(context, &[]))
.await
.map(|_| ())
}
/// Flush handshake message to remote.
async fn flush(&mut self) -> io::Result<()> {
poll_fn(|context| self.0.poll_flush(context)).await
}
/// Receive handshake message from remote.
async fn receive(&mut self) -> io::Result<()> {
poll_fn(|context| self.0.poll_read(context, &mut []))
.await
.map(|_| ())
}
/// Finish the handshake.
///
/// Converts the noise session into transport mode and returns the NoiseSocket.
fn finish(self) -> io::Result<NoiseSocket<TSocket>> {
let session = self
.0
.session
.into_transport_mode()
.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("Noise error: {}", e)))?;
Ok(NoiseSocket { session, ..self.0 })
}
}
#[cfg(test)]
mod test {
use crate::{
socket::{Handshake, NoiseSocket, MAX_PAYLOAD_LENGTH},
NOISE_IX_PARAMETER,
};
use futures::{
executor::block_on,
future::join,
io::{AsyncReadExt, AsyncWriteExt},
};
use memsocket::MemorySocket;
use snow::{params::NoiseParams, Builder, Keypair, SnowError};
use std::io;
fn build_test_connection() -> Result<
(
(Keypair, Handshake<MemorySocket>),
(Keypair, Handshake<MemorySocket>),
),
SnowError,
> {
let parameters: NoiseParams = NOISE_IX_PARAMETER.parse().expect("Invalid protocol name");
let dialer_keypair = Builder::new(parameters.clone()).generate_keypair()?;
let listener_keypair = Builder::new(parameters.clone()).generate_keypair()?;
let dialer_session = Builder::new(parameters.clone())
.local_private_key(&dialer_keypair.private)
.build_initiator()?;
let listener_session = Builder::new(parameters.clone())
.local_private_key(&listener_keypair.private)
.build_responder()?;
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
let (dialer, listener) = (
NoiseSocket::new(dialer_socket, dialer_session),
NoiseSocket::new(listener_socket, listener_session),
);
Ok((
(dialer_keypair, Handshake(dialer)),
(listener_keypair, Handshake(listener)),
))
}
fn perform_handshake(
dialer: Handshake<MemorySocket>,
listener: Handshake<MemorySocket>,
) -> io::Result<(NoiseSocket<MemorySocket>, NoiseSocket<MemorySocket>)> {
let (dialer_result, listener_result) =
block_on(join(dialer.handshake_1rt(), listener.handshake_1rt()));
Ok((dialer_result?, listener_result?))
}
#[test]
fn test_handshake() {
let ((dialer_keypair, dialer), (listener_keypair, listener)) =
build_test_connection().unwrap();
let (dialer_socket, listener_socket) = perform_handshake(dialer, listener).unwrap();
assert_eq!(
dialer_socket.get_remote_static(),
Some(listener_keypair.public.as_ref())
);
assert_eq!(
listener_socket.get_remote_static(),
Some(dialer_keypair.public.as_ref())
);
}
#[test]
fn simple_test() -> io::Result<()> {
let ((_dialer_keypair, dialer), (_listener_keypair, listener)) =
build_test_connection().unwrap();
let (mut dialer_socket, mut listener_socket) = perform_handshake(dialer, listener)?;
block_on(dialer_socket.write_all(b"stormlight"))?;
block_on(dialer_socket.write_all(b" "))?;
block_on(dialer_socket.write_all(b"archive"))?;
block_on(dialer_socket.flush())?;
block_on(dialer_socket.close())?;
let mut buf = Vec::new();
block_on(listener_socket.read_to_end(&mut buf))?;
assert_eq!(buf, b"stormlight archive");
Ok(())
}
#[test]
fn interleaved_writes() -> io::Result<()> {
let ((_dialer_keypair, dialer), (_listener_keypair, listener)) =
build_test_connection().unwrap();
let (mut a, mut b) = perform_handshake(dialer, listener)?;
block_on(a.write_all(b"The Name of the Wind"))?;
block_on(a.flush())?;
block_on(a.write_all(b"The Wise Man's Fear"))?;
block_on(a.flush())?;
block_on(b.write_all(b"The Doors of Stone"))?;
block_on(b.flush())?;
let mut buf = [0; 20];
block_on(b.read_exact(&mut buf))?;
assert_eq!(&buf, b"The Name of the Wind");
let mut buf = [0; 19];
block_on(b.read_exact(&mut buf))?;
assert_eq!(&buf, b"The Wise Man's Fear");
let mut buf = [0; 18];
block_on(a.read_exact(&mut buf))?;
assert_eq!(&buf, b"The Doors of Stone");
Ok(())
}
#[test]
fn u16_max_writes() -> io::Result<()> {
let ((_dialer_keypair, dialer), (_listener_keypair, listener)) =
build_test_connection().unwrap();
let (mut a, mut b) = perform_handshake(dialer, listener)?;
let buf_send = [1; MAX_PAYLOAD_LENGTH];
block_on(a.write_all(&buf_send))?;
block_on(a.flush())?;
let mut buf_receive = [0; MAX_PAYLOAD_LENGTH];
block_on(b.read_exact(&mut buf_receive))?;
assert_eq!(&buf_receive[..], &buf_send[..]);
Ok(())
}
}
| 34.251052 | 99 | 0.483682 |
8ad8e41bb16da80699c000a355fd9b091ef7c1af | 1,553 | // NB: If you change this test, change 'stmt_expr_attributes-feature-gate.rs' at the same time.
// proc_macro_hygiene
// Tracking issue: https://github.com/rust-lang/rust/issues/54727
#![feature(proc_macro_hygiene)]
// stmt_expr_attributes
// Tracking issue: https://github.com/rust-lang/rust/issues/15701
#![feature(stmt_expr_attributes)]
use pin_project::{pin_project, project};
use std::pin::Pin;
fn project_stmt_expr_nightly() {
#[pin_project]
enum Baz<A, B, C, D> {
Variant1(#[pin] A, B),
Variant2 {
#[pin]
field1: C,
field2: D,
},
None,
}
let mut baz = Baz::Variant1(1, 2);
let mut baz = Pin::new(&mut baz).project();
#[project]
match &mut baz {
Baz::Variant1(x, y) => {
let x: &mut Pin<&mut i32> = x;
assert_eq!(**x, 1);
let y: &mut &mut i32 = y;
assert_eq!(**y, 2);
}
Baz::Variant2 { field1, field2 } => {
let _x: &mut Pin<&mut i32> = field1;
let _y: &mut &mut i32 = field2;
}
Baz::None => {}
}
let () = #[project]
match &mut baz {
Baz::Variant1(x, y) => {
let x: &mut Pin<&mut i32> = x;
assert_eq!(**x, 1);
let y: &mut &mut i32 = y;
assert_eq!(**y, 2);
}
Baz::Variant2 { field1, field2 } => {
let _x: &mut Pin<&mut i32> = field1;
let _y: &mut &mut i32 = field2;
}
Baz::None => {}
};
}
fn main() {}
| 24.650794 | 95 | 0.496458 |
237b410e073d6f886b617bfc40f30434b16c7cd7 | 326 | #![feature(const_generics)]
//~^ WARN the feature `const_generics` is incomplete
struct Const<const P: *const u32>; //~ ERROR: using raw pointers as const generic parameters
fn main() {
let _: Const<{ 15 as *const _ }> = Const::<{ 10 as *const _ }>;
let _: Const<{ 10 as *const _ }> = Const::<{ 10 as *const _ }>;
}
| 32.6 | 92 | 0.628834 |
f747f701f893d6f710c60a61dbd9357fe3ff3686 | 16,142 | //! Websockets client
//!
//! Type definitions required to use [`awc::Client`](super::Client) as a WebSocket client.
//!
//! # Example
//!
//! ```no_run
//! use awc::{Client, ws};
//! use futures_util::{sink::SinkExt, stream::StreamExt};
//!
//! #[actix_rt::main]
//! async fn main() {
//! let (_resp, mut connection) = Client::new()
//! .ws("ws://echo.websocket.org")
//! .connect()
//! .await
//! .unwrap();
//!
//! connection
//! .send(ws::Message::Text("Echo".into()))
//! .await
//! .unwrap();
//! let response = connection.next().await.unwrap().unwrap();
//!
//! assert_eq!(response, ws::Frame::Text("Echo".as_bytes().into()));
//! }
//! ```
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::rc::Rc;
use std::{fmt, str};
use actix_codec::Framed;
use actix_http::cookie::{Cookie, CookieJar};
use actix_http::{ws, Payload, RequestHead};
use actix_rt::time::timeout;
pub use actix_http::ws::{CloseCode, CloseReason, Codec, Frame, Message};
use crate::connect::BoxedSocket;
use crate::error::{InvalidUrl, SendRequestError, WsClientError};
use crate::http::header::{
self, HeaderName, HeaderValue, IntoHeaderValue, AUTHORIZATION,
};
use crate::http::{
ConnectionType, Error as HttpError, Method, StatusCode, Uri, Version,
};
use crate::response::ClientResponse;
use crate::ClientConfig;
/// `WebSocket` connection
pub struct WebsocketsRequest {
pub(crate) head: RequestHead,
err: Option<HttpError>,
origin: Option<HeaderValue>,
protocols: Option<String>,
addr: Option<SocketAddr>,
max_size: usize,
server_mode: bool,
cookies: Option<CookieJar>,
config: Rc<ClientConfig>,
}
impl WebsocketsRequest {
/// Create new websocket connection
pub(crate) fn new<U>(uri: U, config: Rc<ClientConfig>) -> Self
where
Uri: TryFrom<U>,
<Uri as TryFrom<U>>::Error: Into<HttpError>,
{
let mut err = None;
#[allow(clippy::field_reassign_with_default)]
let mut head = {
let mut head = RequestHead::default();
head.method = Method::GET;
head.version = Version::HTTP_11;
head
};
match Uri::try_from(uri) {
Ok(uri) => head.uri = uri,
Err(e) => err = Some(e.into()),
}
WebsocketsRequest {
head,
err,
config,
addr: None,
origin: None,
protocols: None,
max_size: 65_536,
server_mode: false,
cookies: None,
}
}
/// Set socket address of the server.
///
/// This address is used for connection. If address is not
/// provided url's host name get resolved.
pub fn address(mut self, addr: SocketAddr) -> Self {
self.addr = Some(addr);
self
}
/// Set supported websocket protocols
pub fn protocols<U, V>(mut self, protos: U) -> Self
where
U: IntoIterator<Item = V>,
V: AsRef<str>,
{
let mut protos = protos
.into_iter()
.fold(String::new(), |acc, s| acc + s.as_ref() + ",");
protos.pop();
self.protocols = Some(protos);
self
}
/// Set a cookie
pub fn cookie(mut self, cookie: Cookie<'_>) -> Self {
if self.cookies.is_none() {
let mut jar = CookieJar::new();
jar.add(cookie.into_owned());
self.cookies = Some(jar)
} else {
self.cookies.as_mut().unwrap().add(cookie.into_owned());
}
self
}
/// Set request Origin
pub fn origin<V, E>(mut self, origin: V) -> Self
where
HeaderValue: TryFrom<V, Error = E>,
HttpError: From<E>,
{
match HeaderValue::try_from(origin) {
Ok(value) => self.origin = Some(value),
Err(e) => self.err = Some(e.into()),
}
self
}
/// Set max frame size
///
/// By default max size is set to 64kB
pub fn max_frame_size(mut self, size: usize) -> Self {
self.max_size = size;
self
}
/// Disable payload masking. By default ws client masks frame payload.
pub fn server_mode(mut self) -> Self {
self.server_mode = true;
self
}
/// Append a header.
///
/// Header gets appended to existing header.
/// To override header use `set_header()` method.
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
match HeaderName::try_from(key) {
Ok(key) => match value.try_into() {
Ok(value) => {
self.head.headers.append(key, value);
}
Err(e) => self.err = Some(e.into()),
},
Err(e) => self.err = Some(e.into()),
}
self
}
/// Insert a header, replaces existing header.
pub fn set_header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
match HeaderName::try_from(key) {
Ok(key) => match value.try_into() {
Ok(value) => {
self.head.headers.insert(key, value);
}
Err(e) => self.err = Some(e.into()),
},
Err(e) => self.err = Some(e.into()),
}
self
}
/// Insert a header only if it is not yet set.
pub fn set_header_if_none<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
match HeaderName::try_from(key) {
Ok(key) => {
if !self.head.headers.contains_key(&key) {
match value.try_into() {
Ok(value) => {
self.head.headers.insert(key, value);
}
Err(e) => self.err = Some(e.into()),
}
}
}
Err(e) => self.err = Some(e.into()),
}
self
}
/// Set HTTP basic authorization header
pub fn basic_auth<U>(self, username: U, password: Option<&str>) -> Self
where
U: fmt::Display,
{
let auth = match password {
Some(password) => format!("{}:{}", username, password),
None => format!("{}:", username),
};
self.header(AUTHORIZATION, format!("Basic {}", base64::encode(&auth)))
}
/// Set HTTP bearer authentication header
pub fn bearer_auth<T>(self, token: T) -> Self
where
T: fmt::Display,
{
self.header(AUTHORIZATION, format!("Bearer {}", token))
}
/// Complete request construction and connect to a websockets server.
pub async fn connect(
mut self,
) -> Result<(ClientResponse, Framed<BoxedSocket, Codec>), WsClientError> {
if let Some(e) = self.err.take() {
return Err(e.into());
}
// validate uri
let uri = &self.head.uri;
if uri.host().is_none() {
return Err(InvalidUrl::MissingHost.into());
} else if uri.scheme().is_none() {
return Err(InvalidUrl::MissingScheme.into());
} else if let Some(scheme) = uri.scheme() {
match scheme.as_str() {
"http" | "ws" | "https" | "wss" => {}
_ => return Err(InvalidUrl::UnknownScheme.into()),
}
} else {
return Err(InvalidUrl::UnknownScheme.into());
}
if !self.head.headers.contains_key(header::HOST) {
self.head.headers.insert(
header::HOST,
HeaderValue::from_str(uri.host().unwrap()).unwrap(),
);
}
// set cookies
if let Some(ref mut jar) = self.cookies {
let cookie: String = jar
.delta()
// ensure only name=value is written to cookie header
.map(|c| Cookie::new(c.name(), c.value()).encoded().to_string())
.collect::<Vec<_>>()
.join("; ");
if !cookie.is_empty() {
self.head
.headers
.insert(header::COOKIE, HeaderValue::from_str(&cookie).unwrap());
}
}
// origin
if let Some(origin) = self.origin.take() {
self.head.headers.insert(header::ORIGIN, origin);
}
self.head.set_connection_type(ConnectionType::Upgrade);
self.head
.headers
.insert(header::UPGRADE, HeaderValue::from_static("websocket"));
self.head.headers.insert(
header::SEC_WEBSOCKET_VERSION,
HeaderValue::from_static("13"),
);
if let Some(protocols) = self.protocols.take() {
self.head.headers.insert(
header::SEC_WEBSOCKET_PROTOCOL,
HeaderValue::try_from(protocols.as_str()).unwrap(),
);
}
// Generate a random key for the `Sec-WebSocket-Key` header.
// a base64-encoded (see Section 4 of [RFC4648]) value that,
// when decoded, is 16 bytes in length (RFC 6455)
let sec_key: [u8; 16] = rand::random();
let key = base64::encode(&sec_key);
self.head.headers.insert(
header::SEC_WEBSOCKET_KEY,
HeaderValue::try_from(key.as_str()).unwrap(),
);
let head = self.head;
let max_size = self.max_size;
let server_mode = self.server_mode;
let fut = self
.config
.connector
.borrow_mut()
.open_tunnel(head, self.addr);
// set request timeout
let (head, framed) = if let Some(to) = self.config.timeout {
timeout(to, fut)
.await
.map_err(|_| SendRequestError::Timeout)
.and_then(|res| res)?
} else {
fut.await?
};
// verify response
if head.status != StatusCode::SWITCHING_PROTOCOLS {
return Err(WsClientError::InvalidResponseStatus(head.status));
}
// Check for "UPGRADE" to websocket header
let has_hdr = if let Some(hdr) = head.headers.get(&header::UPGRADE) {
if let Ok(s) = hdr.to_str() {
s.to_ascii_lowercase().contains("websocket")
} else {
false
}
} else {
false
};
if !has_hdr {
log::trace!("Invalid upgrade header");
return Err(WsClientError::InvalidUpgradeHeader);
}
// Check for "CONNECTION" header
if let Some(conn) = head.headers.get(&header::CONNECTION) {
if let Ok(s) = conn.to_str() {
if !s.to_ascii_lowercase().contains("upgrade") {
log::trace!("Invalid connection header: {}", s);
return Err(WsClientError::InvalidConnectionHeader(conn.clone()));
}
} else {
log::trace!("Invalid connection header: {:?}", conn);
return Err(WsClientError::InvalidConnectionHeader(conn.clone()));
}
} else {
log::trace!("Missing connection header");
return Err(WsClientError::MissingConnectionHeader);
}
if let Some(hdr_key) = head.headers.get(&header::SEC_WEBSOCKET_ACCEPT) {
let encoded = ws::hash_key(key.as_ref());
if hdr_key.as_bytes() != encoded.as_bytes() {
log::trace!(
"Invalid challenge response: expected: {} received: {:?}",
encoded,
key
);
return Err(WsClientError::InvalidChallengeResponse(
encoded,
hdr_key.clone(),
));
}
} else {
log::trace!("Missing SEC-WEBSOCKET-ACCEPT header");
return Err(WsClientError::MissingWebSocketAcceptHeader);
};
// response and ws framed
Ok((
ClientResponse::new(head, Payload::None),
framed.into_map_codec(|_| {
if server_mode {
ws::Codec::new().max_size(max_size)
} else {
ws::Codec::new().max_size(max_size).client_mode()
}
}),
))
}
}
impl fmt::Debug for WebsocketsRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"\nWebsocketsRequest {}:{}",
self.head.method, self.head.uri
)?;
writeln!(f, " headers:")?;
for (key, val) in self.head.headers.iter() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Client;
#[actix_rt::test]
async fn test_debug() {
let request = Client::new().ws("/").header("x-test", "111");
let repr = format!("{:?}", request);
assert!(repr.contains("WebsocketsRequest"));
assert!(repr.contains("x-test"));
}
#[actix_rt::test]
async fn test_header_override() {
let req = Client::builder()
.header(header::CONTENT_TYPE, "111")
.finish()
.ws("/")
.set_header(header::CONTENT_TYPE, "222");
assert_eq!(
req.head
.headers
.get(header::CONTENT_TYPE)
.unwrap()
.to_str()
.unwrap(),
"222"
);
}
#[actix_rt::test]
async fn basic_auth() {
let req = Client::new()
.ws("/")
.basic_auth("username", Some("password"));
assert_eq!(
req.head
.headers
.get(header::AUTHORIZATION)
.unwrap()
.to_str()
.unwrap(),
"Basic dXNlcm5hbWU6cGFzc3dvcmQ="
);
let req = Client::new().ws("/").basic_auth("username", None);
assert_eq!(
req.head
.headers
.get(header::AUTHORIZATION)
.unwrap()
.to_str()
.unwrap(),
"Basic dXNlcm5hbWU6"
);
}
#[actix_rt::test]
async fn bearer_auth() {
let req = Client::new().ws("/").bearer_auth("someS3cr3tAutht0k3n");
assert_eq!(
req.head
.headers
.get(header::AUTHORIZATION)
.unwrap()
.to_str()
.unwrap(),
"Bearer someS3cr3tAutht0k3n"
);
let _ = req.connect();
}
#[actix_rt::test]
async fn basics() {
let req = Client::new()
.ws("http://localhost/")
.origin("test-origin")
.max_frame_size(100)
.server_mode()
.protocols(&["v1", "v2"])
.set_header_if_none(header::CONTENT_TYPE, "json")
.set_header_if_none(header::CONTENT_TYPE, "text")
.cookie(Cookie::build("cookie1", "value1").finish());
assert_eq!(
req.origin.as_ref().unwrap().to_str().unwrap(),
"test-origin"
);
assert_eq!(req.max_size, 100);
assert_eq!(req.server_mode, true);
assert_eq!(req.protocols, Some("v1,v2".to_string()));
assert_eq!(
req.head.headers.get(header::CONTENT_TYPE).unwrap(),
header::HeaderValue::from_static("json")
);
let _ = req.connect().await;
assert!(Client::new().ws("/").connect().await.is_err());
assert!(Client::new().ws("http:///test").connect().await.is_err());
assert!(Client::new().ws("hmm://test.com/").connect().await.is_err());
}
}
| 30.399247 | 90 | 0.505204 |
2f503f1868309424040a21366204c4ca191f970e | 13,839 | use crate::decimal::{MAX_PRECISION_I32, POWERS_10};
use crate::Decimal;
// The maximum power of 10 that a 32 bit integer can store
pub(super) const MAX_I32_SCALE: i32 = 9;
// The maximum power of 10 that a 64 bit integer can store
pub(super) const MAX_I64_SCALE: u32 = 19;
pub(super) const U32_MAX: u64 = u32::MAX as u64;
#[derive(Debug)]
pub struct Buf12 {
pub data: [u32; 3],
}
impl Buf12 {
pub(super) const fn from_dec64(value: &Dec64) -> Self {
Buf12 {
data: [value.low64 as u32, (value.low64 >> 32) as u32, value.hi],
}
}
pub(super) const fn from_decimal(value: &Decimal) -> Self {
Buf12 {
data: value.mantissa_array3(),
}
}
#[inline(always)]
pub const fn lo(&self) -> u32 {
self.data[0]
}
#[inline(always)]
pub const fn mid(&self) -> u32 {
self.data[1]
}
#[inline(always)]
pub const fn hi(&self) -> u32 {
self.data[2]
}
#[inline(always)]
pub fn set_lo(&mut self, value: u32) {
self.data[0] = value;
}
#[inline(always)]
pub fn set_mid(&mut self, value: u32) {
self.data[1] = value;
}
#[inline(always)]
pub fn set_hi(&mut self, value: u32) {
self.data[2] = value;
}
#[inline(always)]
pub const fn low64(&self) -> u64 {
((self.data[1] as u64) << 32) | (self.data[0] as u64)
}
#[inline(always)]
pub fn set_low64(&mut self, value: u64) {
self.data[1] = (value >> 32) as u32;
self.data[0] = value as u32;
}
#[inline(always)]
pub const fn high64(&self) -> u64 {
((self.data[2] as u64) << 32) | (self.data[1] as u64)
}
#[inline(always)]
pub fn set_high64(&mut self, value: u64) {
self.data[2] = (value >> 32) as u32;
self.data[1] = value as u32;
}
// Determine the maximum value of x that ensures that the quotient when scaled up by 10^x
// still fits in 96 bits. Ultimately, we want to make scale positive - if we can't then
// we're going to overflow. Because x is ultimately used to lookup inside the POWERS array, it
// must be a valid value 0 <= x <= 9
pub fn find_scale(&self, scale: i32) -> Option<usize> {
const OVERFLOW_MAX_9_HI: u32 = 4;
const OVERFLOW_MAX_8_HI: u32 = 42;
const OVERFLOW_MAX_7_HI: u32 = 429;
const OVERFLOW_MAX_6_HI: u32 = 4294;
const OVERFLOW_MAX_5_HI: u32 = 42949;
const OVERFLOW_MAX_4_HI: u32 = 429496;
const OVERFLOW_MAX_3_HI: u32 = 4294967;
const OVERFLOW_MAX_2_HI: u32 = 42949672;
const OVERFLOW_MAX_1_HI: u32 = 429496729;
const OVERFLOW_MAX_9_LOW64: u64 = 5441186219426131129;
let hi = self.data[2];
let low64 = self.low64();
let mut x = 0usize;
// Quick check to stop us from trying to scale any more.
//
if hi > OVERFLOW_MAX_1_HI {
// If it's less than 0, which it probably is - overflow. We can't do anything.
if scale < 0 {
return None;
}
return Some(x);
}
if scale > MAX_PRECISION_I32 - 9 {
// We can't scale by 10^9 without exceeding the max scale factor.
// Instead, we'll try to scale by the most that we can and see if that works.
// This is safe to do due to the check above. e.g. scale > 19 in the above, so it will
// evaluate to 9 or less below.
x = (MAX_PRECISION_I32 - scale) as usize;
if hi < POWER_OVERFLOW_VALUES[x - 1].data[2] {
if x as i32 + scale < 0 {
// We still overflow
return None;
}
return Some(x);
}
} else if hi < OVERFLOW_MAX_9_HI || hi == OVERFLOW_MAX_9_HI && low64 <= OVERFLOW_MAX_9_LOW64 {
return Some(9);
}
// Do a binary search to find a power to scale by that is less than 9
x = if hi > OVERFLOW_MAX_5_HI {
if hi > OVERFLOW_MAX_3_HI {
if hi > OVERFLOW_MAX_2_HI {
1
} else {
2
}
} else if hi > OVERFLOW_MAX_4_HI {
3
} else {
4
}
} else if hi > OVERFLOW_MAX_7_HI {
if hi > OVERFLOW_MAX_6_HI {
5
} else {
6
}
} else if hi > OVERFLOW_MAX_8_HI {
7
} else {
8
};
// Double check what we've found won't overflow. Otherwise, we go one below.
if hi == POWER_OVERFLOW_VALUES[x - 1].data[2] && low64 > POWER_OVERFLOW_VALUES[x - 1].low64() {
x -= 1;
}
// Confirm we've actually resolved things
if x as i32 + scale < 0 {
None
} else {
Some(x)
}
}
}
// This is a table of the largest values that will not overflow when multiplied
// by a given power as represented by the index.
static POWER_OVERFLOW_VALUES: [Buf12; 8] = [
Buf12 {
data: [2576980377, 2576980377, 429496729],
},
Buf12 {
data: [687194767, 4123168604, 42949672],
},
Buf12 {
data: [2645699854, 1271310319, 4294967],
},
Buf12 {
data: [694066715, 3133608139, 429496],
},
Buf12 {
data: [2216890319, 2890341191, 42949],
},
Buf12 {
data: [2369172679, 4154504685, 4294],
},
Buf12 {
data: [4102387834, 2133437386, 429],
},
Buf12 {
data: [410238783, 4078814305, 42],
},
];
pub(super) struct Dec64 {
pub negative: bool,
pub scale: u32,
pub hi: u32,
pub low64: u64,
}
impl Dec64 {
pub(super) const fn new(d: &Decimal) -> Dec64 {
let m = d.mantissa_array3();
if m[1] == 0 {
Dec64 {
negative: d.is_sign_negative(),
scale: d.scale(),
hi: m[2],
low64: m[0] as u64,
}
} else {
Dec64 {
negative: d.is_sign_negative(),
scale: d.scale(),
hi: m[2],
low64: ((m[1] as u64) << 32) | (m[0] as u64),
}
}
}
#[inline(always)]
pub(super) const fn lo(&self) -> u32 {
self.low64 as u32
}
#[inline(always)]
pub(super) const fn mid(&self) -> u32 {
(self.low64 >> 32) as u32
}
#[inline(always)]
pub(super) const fn high64(&self) -> u64 {
(self.low64 >> 32) | ((self.hi as u64) << 32)
}
pub(super) const fn to_decimal(&self) -> Decimal {
Decimal::from_parts(
self.low64 as u32,
(self.low64 >> 32) as u32,
self.hi,
self.negative,
self.scale,
)
}
}
pub struct Buf16 {
pub data: [u32; 4],
}
impl Buf16 {
pub const fn zero() -> Self {
Buf16 { data: [0, 0, 0, 0] }
}
pub const fn low64(&self) -> u64 {
((self.data[1] as u64) << 32) | (self.data[0] as u64)
}
pub fn set_low64(&mut self, value: u64) {
self.data[1] = (value >> 32) as u32;
self.data[0] = value as u32;
}
pub const fn mid64(&self) -> u64 {
((self.data[2] as u64) << 32) | (self.data[1] as u64)
}
pub fn set_mid64(&mut self, value: u64) {
self.data[2] = (value >> 32) as u32;
self.data[1] = value as u32;
}
pub const fn high64(&self) -> u64 {
((self.data[3] as u64) << 32) | (self.data[2] as u64)
}
pub fn set_high64(&mut self, value: u64) {
self.data[3] = (value >> 32) as u32;
self.data[2] = value as u32;
}
}
#[derive(Debug)]
pub struct Buf24 {
pub data: [u32; 6],
}
impl Buf24 {
pub const fn zero() -> Self {
Buf24 {
data: [0, 0, 0, 0, 0, 0],
}
}
pub const fn low64(&self) -> u64 {
((self.data[1] as u64) << 32) | (self.data[0] as u64)
}
pub fn set_low64(&mut self, value: u64) {
self.data[1] = (value >> 32) as u32;
self.data[0] = value as u32;
}
#[allow(dead_code)]
pub const fn mid64(&self) -> u64 {
((self.data[3] as u64) << 32) | (self.data[2] as u64)
}
pub fn set_mid64(&mut self, value: u64) {
self.data[3] = (value >> 32) as u32;
self.data[2] = value as u32;
}
#[allow(dead_code)]
pub const fn high64(&self) -> u64 {
((self.data[5] as u64) << 32) | (self.data[4] as u64)
}
pub fn set_high64(&mut self, value: u64) {
self.data[5] = (value >> 32) as u32;
self.data[4] = value as u32;
}
pub const fn upper_word(&self) -> usize {
if self.data[5] > 0 {
return 5;
}
if self.data[4] > 0 {
return 4;
}
if self.data[3] > 0 {
return 3;
}
if self.data[2] > 0 {
return 2;
}
if self.data[1] > 0 {
return 1;
}
0
}
// Attempt to rescale the number into 96 bits. If successful, the scale is returned wrapped
// in an Option. If it failed due to overflow, we return None.
// * `upper` - Index of last non-zero value in self.
// * `scale` - Current scale factor for this value.
pub fn rescale(&mut self, upper: usize, scale: u32) -> Option<u32> {
let mut scale = scale as i32;
let mut upper = upper;
// Determine a rescale target to start with
let mut rescale_target = 0i32;
if upper > 2 {
rescale_target = upper as i32 * 32 - 64 - 1;
rescale_target -= self.data[upper].leading_zeros() as i32;
rescale_target = ((rescale_target * 77) >> 8) + 1;
if rescale_target > scale {
return None;
}
}
// Make sure we scale enough to bring it into a valid range
if rescale_target < scale - MAX_PRECISION_I32 {
rescale_target = scale - MAX_PRECISION_I32;
}
if rescale_target > 0 {
// We're going to keep reducing by powers of 10. So, start by reducing the scale by
// that amount.
scale -= rescale_target;
let mut sticky = 0;
let mut remainder = 0;
loop {
sticky |= remainder;
let mut power = if rescale_target > 8 {
POWERS_10[9]
} else {
POWERS_10[rescale_target as usize]
};
let high = self.data[upper];
let high_quotient = high / power;
remainder = high - high_quotient * power;
for item in self.data.iter_mut().rev().skip(6 - upper) {
let num = (*item as u64).wrapping_add((remainder as u64) << 32);
*item = (num / power as u64) as u32;
remainder = (num as u32).wrapping_sub(item.wrapping_mul(power));
}
self.data[upper] = high_quotient;
// If the high quotient was zero then decrease the upper bound
if high_quotient == 0 && upper > 0 {
upper -= 1;
}
if rescale_target > MAX_I32_SCALE {
// Scale some more
rescale_target -= MAX_I32_SCALE;
continue;
}
// If we fit into 96 bits then we've scaled enough. Otherwise, scale once more.
if upper > 2 {
if scale == 0 {
return None;
}
// Equivalent to scaling down by 10
rescale_target = 1;
scale -= 1;
continue;
}
// Round the final result.
power >>= 1;
let carried = if power <= remainder {
// If we're less than half then we're fine. Otherwise, we round if odd or if the
// sticky bit is set.
if power < remainder || ((self.data[0] & 1) | sticky) != 0 {
// Round up
self.data[0] = self.data[0].wrapping_add(1);
// Check if we carried
self.data[0] == 0
} else {
false
}
} else {
false
};
// If we carried then propagate through the portions
if carried {
let mut pos = 0;
for (index, value) in self.data.iter_mut().enumerate().skip(1) {
pos = index;
*value = value.wrapping_add(1);
if *value != 0 {
break;
}
}
// If we ended up rounding over the 96 bits then we'll try to rescale down (again)
if pos > 2 {
// Nothing to scale down from will cause overflow
if scale == 0 {
return None;
}
// Loop back around using scale of 10.
// Reset the sticky bit and remainder before looping.
upper = pos;
sticky = 0;
remainder = 0;
rescale_target = 1;
scale -= 1;
continue;
}
}
break;
}
}
Some(scale as u32)
}
}
| 29.954545 | 103 | 0.476624 |
fc5e64e06828e73f138cdbd7baf8febd63734cc1 | 73 | collection_ve_map.WILDCARD.SafeVarargs
collection_ve_map.WILDCARD.elma
| 24.333333 | 39 | 0.890411 |
e87d84ab0d225ed104b084004e68bb03681adad7 | 5,720 | // Generated from definition io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition
#[derive(Clone, Debug, Default, PartialEq)]
pub struct APIServiceCondition {
/// Last time the condition transitioned from one status to another.
pub last_transition_time: Option<crate::v1_10::apimachinery::pkg::apis::meta::v1::Time>,
/// Human-readable message indicating details about last transition.
pub message: Option<String>,
/// Unique, one-word, CamelCase reason for the condition's last transition.
pub reason: Option<String>,
/// Status is the status of the condition. Can be True, False, Unknown.
pub status: String,
/// Type is the type of the condition.
pub type_: String,
}
impl<'de> serde::Deserialize<'de> for APIServiceCondition {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_last_transition_time,
Key_message,
Key_reason,
Key_status,
Key_type_,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"lastTransitionTime" => Field::Key_last_transition_time,
"message" => Field::Key_message,
"reason" => Field::Key_reason,
"status" => Field::Key_status,
"type" => Field::Key_type_,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = APIServiceCondition;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct APIServiceCondition")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_last_transition_time: Option<crate::v1_10::apimachinery::pkg::apis::meta::v1::Time> = None;
let mut value_message: Option<String> = None;
let mut value_reason: Option<String> = None;
let mut value_status: Option<String> = None;
let mut value_type_: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_last_transition_time => value_last_transition_time = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_message => value_message = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reason => value_reason = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_type_ => value_type_ = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(APIServiceCondition {
last_transition_time: value_last_transition_time,
message: value_message,
reason: value_reason,
status: value_status.ok_or_else(|| serde::de::Error::missing_field("status"))?,
type_: value_type_.ok_or_else(|| serde::de::Error::missing_field("type"))?,
})
}
}
deserializer.deserialize_struct(
"APIServiceCondition",
&[
"lastTransitionTime",
"message",
"reason",
"status",
"type",
],
Visitor,
)
}
}
impl serde::Serialize for APIServiceCondition {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"APIServiceCondition",
2 +
self.last_transition_time.as_ref().map_or(0, |_| 1) +
self.message.as_ref().map_or(0, |_| 1) +
self.reason.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.last_transition_time {
serde::ser::SerializeStruct::serialize_field(&mut state, "lastTransitionTime", value)?;
}
if let Some(value) = &self.message {
serde::ser::SerializeStruct::serialize_field(&mut state, "message", value)?;
}
if let Some(value) = &self.reason {
serde::ser::SerializeStruct::serialize_field(&mut state, "reason", value)?;
}
serde::ser::SerializeStruct::serialize_field(&mut state, "status", &self.status)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "type", &self.type_)?;
serde::ser::SerializeStruct::end(state)
}
}
| 42.686567 | 132 | 0.541434 |
e863456a6483b02c15d03625fb538e92ba31fdb7 | 9,515 | use makepad_render::*;
use makepad_widget::*;
#[derive(Clone)]
pub struct HomePage {
pub view: ScrollView,
pub shadow: ScrollShadow,
pub text: DrawText,
pub bg: DrawColor,
pub email_input: TextInput,
pub email_state: EmailState,
pub email_signal: Signal,
pub example_texts: ElementsCounted<TextInput>,
pub send_mail_button: NormalButton,
}
#[derive(Clone)]
pub enum EmailState {
Empty,
Invalid,
Valid,
Sending,
ErrorSending,
OkSending
}
impl HomePage {
pub fn new(cx: &mut Cx) -> Self {
Self {
view: ScrollView::new_standard_hv(cx),
bg: DrawColor::new(cx, default_shader!()),
text: DrawText::new(cx, default_shader!()),
shadow: ScrollShadow::new(cx),
send_mail_button: NormalButton::new(cx),
email_signal: cx.new_signal(),
email_input: TextInput::new(cx, TextInputOptions {
multiline: false,
read_only: false,
empty_message: "Enter email".to_string()
}),
email_state: EmailState::Empty,
example_texts: ElementsCounted::new(
TextInput::new(cx, TextInputOptions {
multiline: true,
read_only: true,
empty_message: "".to_string()
})
),
}
}
pub fn style(cx: &mut Cx) {
live_body!(cx, {
self::color_bg: #2;
self::text_style_heading: TextStyle {
font_size: 28.0,
line_spacing: 2.0,
..makepad_widget::widgetstyle::text_style_normal
}
self::text_style_body: TextStyle {
font_size: 10.0,
height_factor: 2.0,
line_spacing: 3.0,
..makepad_widget::widgetstyle::text_style_normal
}
self::text_style_point: TextStyle {
font_size: 8.0,
line_spacing: 2.5,
..makepad_widget::widgetstyle::text_style_normal
}
self::text_color: #b;
self::layout_main: Layout {
padding: {l: 10., t: 10., r: 10., b: 10.},
new_line_padding: 15.,
line_wrap: MaxSize(550.),
}
})
}
pub fn handle_home_page(&mut self, cx: &mut Cx, event: &mut Event) {
if let Event::Signal(sig) = event {
if let Some(statusses) = sig.signals.get(&self.email_signal) {
for status in statusses {
if *status == Cx::status_http_send_ok() {
self.email_state = EmailState::OkSending;
}
else if *status == Cx::status_http_send_fail() {
self.email_state = EmailState::ErrorSending;
}
self.view.redraw_view(cx);
}
}
}
if let TextEditorEvent::Change = self.email_input.handle_text_input(cx, event) {
let email = self.email_input.get_value();
if email.len()> 0 && !email.find("@").is_some() {
self.email_state = EmailState::Invalid
}
else if email.len()>0 {
self.email_state = EmailState::Valid
}
else {
self.email_state = EmailState::Empty
}
self.view.redraw_view(cx);
}
if let ButtonEvent::Clicked = self.send_mail_button.handle_normal_button(cx, event) {
match self.email_state {
EmailState::Valid | EmailState::ErrorSending => {
self.email_state = EmailState::Sending;
let email = self.email_input.get_value();
cx.http_send("POST", "/subscribe", "http", "makepad.nl", 80, "text/plain", email.as_bytes(), self.email_signal);
self.view.redraw_view(cx);
},
_ => ()
}
}
for text_input in self.example_texts.iter() {
text_input.handle_text_input(cx, event);
}
self.view.handle_scroll_view(cx, event);
}
pub fn draw_home_page(&mut self, cx: &mut Cx) {
if self.view.begin_view(cx, live_layout!(cx, self::layout_main)).is_err() {return};
self.bg.color = live_vec4!(cx, self::color_bg);
self.bg.draw_quad_rel(cx, cx.get_turtle_rect());//let inst = self.bg.begin_quad_fill(cx);
self.bg.area().set_do_scroll(cx, false, false);
let t = &mut self.text;
t.color = live_vec4!(cx, self::text_color);
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "Introducing Makepad\n");
t.text_style = live_text_style!(cx, self::text_style_body);
t.draw_text_walk(cx, "\
Makepad is a new VR, web and native collaborative shader programming environment. \
It will support many different shader modes including many vertex-shaders \
besides the well known shader toy SDF programs. This makes shader coding possible \
for more compute constrained environments like VR goggles or mobiles.\
Try makepad now on a Quest in the quest browser, click the goggles top right of the UI. Try touching the leaves of the tree with your hands! Magic!\n");
self.email_input.draw_text_input(cx);
self.send_mail_button.draw_normal_button(cx, match self.email_state {
EmailState::Empty => "Sign up for our newsletter here.",
EmailState::Invalid => "Email adress invalid",
EmailState::Valid => "Click here to subscribe to our newsletter",
EmailState::Sending => "Submitting your email adress..",
EmailState::ErrorSending => "Could not send your email adress, please retry!",
EmailState::OkSending => "Thank you, we'll keep you informed!"
});
cx.turtle_new_line();
t.draw_text_walk(cx, "\
The Makepad development platform and library ecosystem are MIT licensed, \
for the Quest and in the future iOS we will provide paid, native versions, \
\n");
t.draw_text_walk(cx, "\
We are still building the collaborative backend, so for now you can simply play with the shader code\
\n");
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "How to install the native version\n");
t.text_style = live_text_style!(cx, self::text_style_body);
t.draw_text_walk(cx, "\
On all platforms first install Rust. \
On windows feel free to ignore the warnings about MSVC, makepad uses the gnu chain. \
Copy this url to your favorite browser.\n");
self.example_texts.get_draw(cx).draw_text_input_static(cx, "\
https://www.rust-lang.org/tools/install");
cx.turtle_new_line();
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "MacOS\n");
self.example_texts.get_draw(cx).draw_text_input_static(cx, "\
git clone https://github.com/makepad/makepad\n\
cd makepad\n\
tools/macos_rustup.sh\n\
cargo run -p makepad --release");
cx.turtle_new_line();
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "Windows\n");
self.example_texts.get_draw(cx).draw_text_input_static(cx, "\
Clone this repo using either gitub desktop or commandline: https://github.com/makepad/makepad\n\
Open a cmd.exe in the directory you just cloned. Gh desktop makes: Documents\\Github\\makepad\n\
tools\\windows_rustup.bat\n\
cargo run -p makepad --release --target x86_64-pc-windows-gnu");
cx.turtle_new_line();
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "Linux\n");
self.example_texts.get_draw(cx).draw_text_input_static(cx, "\
git clone https://github.com/makepad/makepad\n\
cd makepad\n\
tools/linux_rustup.sh\n\
cargo run -p makepad --release");
cx.turtle_new_line();
t.text_style = live_text_style!(cx, self::text_style_heading);
t.draw_text_walk(cx, "Troubleshooting\n");
self.example_texts.get_draw(cx).draw_text_input_static(cx, "\
Delete old settings unix: rm *.ron\n\
Delete old settings windows: del *.ron\n\
Make sure you are on master: git checkout master\n\
Update rust: rustup update\n\
Make sure you have wasm: rustup target add wasm32-unknown-unknown\n\
Pull the latest: git pull\n\
If gnu chain for some reason doesn't work on windows, use the msvc chain\n\
Still have a problem? Report here: https://github.com/makepad/makepad/issues");
cx.turtle_new_line();
self.shadow.draw_shadow_top(cx);
//self.bg.end_quad_fill(cx, inst);
self.view.end_view(cx);
}
}
| 39.318182 | 164 | 0.561745 |
4b7275a23d9121e66774246d51c53f60236a9608 | 3,304 | //! ErgoTree
use crate::{
ast::{Constant, Expr},
types::SType,
};
use sigma_ser::serializer::SerializationError;
use sigma_ser::serializer::SigmaSerializable;
use sigma_ser::vlq_encode;
use std::io;
use std::rc::Rc;
use vlq_encode::{ReadSigmaVlqExt, WriteSigmaVlqExt};
/** The root of ErgoScript IR. Serialized instances of this class are self sufficient and can be passed around.
*/
#[derive(PartialEq, Debug)]
#[allow(dead_code)]
pub struct ErgoTree {
header: ErgoTreeHeader,
constants: Vec<Constant>,
root: Rc<Expr>,
}
#[derive(PartialEq, Debug)]
struct ErgoTreeHeader(u8);
impl ErgoTree {
const DEFAULT_HEADER: ErgoTreeHeader = ErgoTreeHeader(0);
/// get Expr out of ErgoTree
pub fn proposition(&self) -> Rc<Expr> {
self.root.clone()
}
/// build ErgoTree from an Expr
pub fn from_proposition(expr: Rc<Expr>) -> ErgoTree {
match &*expr {
Expr::Const(c) if c.tpe == SType::SSigmaProp => ErgoTree {
header: ErgoTree::DEFAULT_HEADER,
constants: Vec::new(),
root: expr.clone(),
},
_ => panic!("not yet supported"),
}
}
}
impl SigmaSerializable for ErgoTreeHeader {
fn sigma_serialize<W: WriteSigmaVlqExt>(&self, w: &mut W) -> Result<(), io::Error> {
w.put_u8(self.0)?;
Ok(())
}
fn sigma_parse<R: ReadSigmaVlqExt>(r: &mut R) -> Result<Self, SerializationError> {
let header = r.get_u8()?;
Ok(ErgoTreeHeader(header))
}
}
impl SigmaSerializable for ErgoTree {
fn sigma_serialize<W: WriteSigmaVlqExt>(&self, w: &mut W) -> Result<(), io::Error> {
self.header.sigma_serialize(w)?;
w.put_usize_as_u32(self.constants.len())?;
assert!(
self.constants.is_empty(),
"separate constants serialization is not yet supported"
);
self.root.sigma_serialize(w)?;
Ok(())
}
fn sigma_parse<R: ReadSigmaVlqExt>(r: &mut R) -> Result<Self, SerializationError> {
let header = ErgoTreeHeader::sigma_parse(r)?;
let constants_len = r.get_u32()?;
assert!(
constants_len == 0,
"separate constants serialization is not yet supported"
);
let constants = Vec::new();
let root = Expr::sigma_parse(r)?;
Ok(ErgoTree {
header,
constants,
root: Rc::new(root),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{ast::ConstantVal, data::SigmaProp};
use proptest::prelude::*;
use sigma_ser::test_helpers::*;
impl Arbitrary for ErgoTree {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(any::<SigmaProp>())
.prop_map(|p| {
ErgoTree::from_proposition(Rc::new(Expr::Const(Constant {
tpe: SType::SSigmaProp,
v: ConstantVal::SigmaProp(Box::new(p)),
})))
})
.boxed()
}
}
proptest! {
#[test]
fn ser_roundtrip(v in any::<ErgoTree>()) {
prop_assert_eq![sigma_serialize_roundtrip(&(v)), v];
}
}
}
| 28 | 111 | 0.568099 |
61b58bf1b5999492715d0c3872794acae9d69808 | 54,484 | // This file is part of Substrate.
// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! # BABE (Blind Assignment for Blockchain Extension)
//!
//! BABE is a slot-based block production mechanism which uses a VRF PRNG to
//! randomly perform the slot allocation. On every slot, all the authorities
//! generate a new random number with the VRF function and if it is lower than a
//! given threshold (which is proportional to their weight/stake) they have a
//! right to produce a block. The proof of the VRF function execution will be
//! used by other peer to validate the legitimacy of the slot claim.
//!
//! The engine is also responsible for collecting entropy on-chain which will be
//! used to seed the given VRF PRNG. An epoch is a contiguous number of slots
//! under which we will be using the same authority set. During an epoch all VRF
//! outputs produced as a result of block production will be collected on an
//! on-chain randomness pool. Epoch changes are announced one epoch in advance,
//! i.e. when ending epoch N, we announce the parameters (randomness,
//! authorities, etc.) for epoch N+2.
//!
//! Since the slot assignment is randomized, it is possible that a slot is
//! assigned to multiple validators in which case we will have a temporary fork,
//! or that a slot is assigned to no validator in which case no block is
//! produced. Which means that block times are not deterministic.
//!
//! The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability
//! of a slot being empty. The choice of this parameter affects the security of
//! the protocol relating to maximum tolerable network delays.
//!
//! In addition to the VRF-based slot assignment described above, which we will
//! call primary slots, the engine also supports a deterministic secondary slot
//! assignment. Primary slots take precedence over secondary slots, when
//! authoring the node starts by trying to claim a primary slot and falls back
//! to a secondary slot claim attempt. The secondary slot assignment is done
//! by picking the authority at index:
//!
//! `blake2_256(epoch_randomness ++ slot_number) % authorities_len`.
//!
//! The secondary slots supports either a `SecondaryPlain` or `SecondaryVRF`
//! variant. Comparing with `SecondaryPlain` variant, the `SecondaryVRF` variant
//! generates an additional VRF output. The output is not included in beacon
//! randomness, but can be consumed by parachains.
//!
//! The fork choice rule is weight-based, where weight equals the number of
//! primary blocks in the chain. We will pick the heaviest chain (more primary
//! blocks) and will go with the longest one in case of a tie.
//!
//! An in-depth description and analysis of the protocol can be found here:
//! <https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html>
#![forbid(unsafe_code)]
#![warn(missing_docs)]
use std::{
borrow::Cow, collections::HashMap, convert::TryInto, pin::Pin, sync::Arc, time::Duration, u64,
};
use codec::{Decode, Encode};
use futures::channel::mpsc::{channel, Receiver, Sender};
use futures::channel::oneshot;
use futures::prelude::*;
use log::{debug, info, log, trace, warn};
use parking_lot::Mutex;
use prometheus_endpoint::Registry;
use retain_mut::RetainMut;
use schnorrkel::SignatureError;
use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider};
use sc_consensus_epochs::{
descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor,
};
use sc_consensus_slots::{
check_equivocation, BackoffAuthoringBlocksStrategy, CheckedHeader, InherentDataProviderExt,
SlotInfo, StorageChanges,
};
use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE};
use sp_api::ApiExt;
use sp_api::{NumberFor, ProvideRuntimeApi};
use sp_application_crypto::AppKey;
use sp_block_builder::BlockBuilder as BlockBuilderApi;
use sp_blockchain::{
Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult,
};
use sp_consensus::{import_queue::BoxJustificationImport, CanAuthorWith, ImportResult};
use sp_consensus::{
import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier},
BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment,
Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData,
StateAction,
};
use sp_consensus_babe::inherents::BabeInherentData;
use sp_consensus_slots::Slot;
use sp_core::crypto::Public;
use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use sp_runtime::{
generic::{BlockId, OpaqueDigestItemId},
traits::{Block as BlockT, DigestItemFor, Header, Zero},
Justifications,
};
pub use sc_consensus_slots::SlotProportion;
pub use sp_consensus::SyncOracle;
pub use sp_consensus_babe::{
digests::{
CompatibleDigestItem, NextConfigDescriptor, NextEpochDescriptor, PreDigest,
PrimaryPreDigest, SecondaryPlainPreDigest,
},
AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight,
BabeEpochConfiguration, BabeGenesisConfiguration, ConsensusLog, BABE_ENGINE_ID,
VRF_OUTPUT_LENGTH,
};
pub use aux_schema::load_block_weight as block_weight;
mod migration;
mod verification;
pub mod authorship;
pub mod aux_schema;
#[cfg(test)]
mod tests;
/// BABE epoch information
#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)]
pub struct Epoch {
/// The epoch index.
pub epoch_index: u64,
/// The starting slot of the epoch.
pub start_slot: Slot,
/// The duration of this epoch.
pub duration: u64,
/// The authorities and their weights.
pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>,
/// Randomness for this epoch.
pub randomness: [u8; VRF_OUTPUT_LENGTH],
/// Configuration of the epoch.
pub config: BabeEpochConfiguration,
}
impl EpochT for Epoch {
type NextEpochDescriptor = (NextEpochDescriptor, BabeEpochConfiguration);
type Slot = Slot;
fn increment(
&self,
(descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration)
) -> Epoch {
Epoch {
epoch_index: self.epoch_index + 1,
start_slot: self.start_slot + self.duration,
duration: self.duration,
authorities: descriptor.authorities,
randomness: descriptor.randomness,
config,
}
}
fn start_slot(&self) -> Slot {
self.start_slot
}
fn end_slot(&self) -> Slot {
self.start_slot + self.duration
}
}
impl Epoch {
/// Create the genesis epoch (epoch #0). This is defined to start at the slot of
/// the first block, so that has to be provided.
pub fn genesis(
genesis_config: &BabeGenesisConfiguration,
slot: Slot,
) -> Epoch {
Epoch {
epoch_index: 0,
start_slot: slot,
duration: genesis_config.epoch_length,
authorities: genesis_config.genesis_authorities.clone(),
randomness: genesis_config.randomness,
config: BabeEpochConfiguration {
c: genesis_config.c,
allowed_slots: genesis_config.allowed_slots,
},
}
}
}
/// Errors encountered by the babe authorship task.
#[derive(derive_more::Display, Debug)]
pub enum Error<B: BlockT> {
/// Multiple BABE pre-runtime digests
#[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")]
MultiplePreRuntimeDigests,
/// No BABE pre-runtime digest found
#[display(fmt = "No BABE pre-runtime digest found")]
NoPreRuntimeDigest,
/// Multiple BABE epoch change digests
#[display(fmt = "Multiple BABE epoch change digests, rejecting!")]
MultipleEpochChangeDigests,
/// Multiple BABE config change digests
#[display(fmt = "Multiple BABE config change digests, rejecting!")]
MultipleConfigChangeDigests,
/// Could not extract timestamp and slot
#[display(fmt = "Could not extract timestamp and slot: {:?}", _0)]
Extraction(sp_consensus::Error),
/// Could not fetch epoch
#[display(fmt = "Could not fetch epoch at {:?}", _0)]
FetchEpoch(B::Hash),
/// Header rejected: too far in the future
#[display(fmt = "Header {:?} rejected: too far in the future", _0)]
TooFarInFuture(B::Hash),
/// Parent unavailable. Cannot import
#[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)]
ParentUnavailable(B::Hash, B::Hash),
/// Slot number must increase
#[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)]
SlotMustIncrease(Slot, Slot),
/// Header has a bad seal
#[display(fmt = "Header {:?} has a bad seal", _0)]
HeaderBadSeal(B::Hash),
/// Header is unsealed
#[display(fmt = "Header {:?} is unsealed", _0)]
HeaderUnsealed(B::Hash),
/// Slot author not found
#[display(fmt = "Slot author not found")]
SlotAuthorNotFound,
/// Secondary slot assignments are disabled for the current epoch.
#[display(fmt = "Secondary slot assignments are disabled for the current epoch.")]
SecondarySlotAssignmentsDisabled,
/// Bad signature
#[display(fmt = "Bad signature on {:?}", _0)]
BadSignature(B::Hash),
/// Invalid author: Expected secondary author
#[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)]
InvalidAuthor(AuthorityId, AuthorityId),
/// No secondary author expected.
#[display(fmt = "No secondary author expected.")]
NoSecondaryAuthorExpected,
/// VRF verification of block by author failed
#[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)]
VRFVerificationOfBlockFailed(AuthorityId, u128),
/// VRF verification failed
#[display(fmt = "VRF verification failed: {:?}", _0)]
VRFVerificationFailed(SignatureError),
/// Could not fetch parent header
#[display(fmt = "Could not fetch parent header: {:?}", _0)]
FetchParentHeader(sp_blockchain::Error),
/// Expected epoch change to happen.
#[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)]
ExpectedEpochChange(B::Hash, Slot),
/// Unexpected config change.
#[display(fmt = "Unexpected config change")]
UnexpectedConfigChange,
/// Unexpected epoch change
#[display(fmt = "Unexpected epoch change")]
UnexpectedEpochChange,
/// Parent block has no associated weight
#[display(fmt = "Parent block of {} has no associated weight", _0)]
ParentBlockNoAssociatedWeight(B::Hash),
/// Check inherents error
#[display(fmt = "Checking inherents failed: {}", _0)]
CheckInherents(sp_inherents::Error),
/// Unhandled check inherents error
#[display(fmt = "Checking inherents unhandled error: {}", "String::from_utf8_lossy(_0)")]
CheckInherentsUnhandled(sp_inherents::InherentIdentifier),
/// Create inherents error.
#[display(fmt = "Creating inherents failed: {}", _0)]
CreateInherents(sp_inherents::Error),
/// Client error
Client(sp_blockchain::Error),
/// Runtime Api error.
RuntimeApi(sp_api::ApiError),
/// Fork tree error
ForkTree(Box<fork_tree::Error<sp_blockchain::Error>>),
}
impl<B: BlockT> std::convert::From<Error<B>> for String {
fn from(error: Error<B>) -> String {
error.to_string()
}
}
fn babe_err<B: BlockT>(error: Error<B>) -> Error<B> {
debug!(target: "babe", "{}", error);
error
}
/// Intermediate value passed to block importer.
pub struct BabeIntermediate<B: BlockT> {
/// The epoch descriptor.
pub epoch_descriptor: ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>,
}
/// Intermediate key for Babe engine.
pub static INTERMEDIATE_KEY: &[u8] = b"babe1";
/// A slot duration. Create with `get_or_compute`.
// FIXME: Once Rust has higher-kinded types, the duplication between this
// and `super::babe::Config` can be eliminated.
// https://github.com/paritytech/substrate/issues/2434
#[derive(Clone)]
pub struct Config(sc_consensus_slots::SlotDuration<BabeGenesisConfiguration>);
impl Config {
/// Either fetch the slot duration from disk or compute it from the genesis
/// state.
pub fn get_or_compute<B: BlockT, C>(client: &C) -> ClientResult<Self> where
C: AuxStore + ProvideRuntimeApi<B> + UsageProvider<B>, C::Api: BabeApi<B>,
{
trace!(target: "babe", "Getting slot duration");
match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| {
let has_api_v1 = a.has_api_with::<dyn BabeApi<B>, _>(
&b, |v| v == 1,
)?;
let has_api_v2 = a.has_api_with::<dyn BabeApi<B>, _>(
&b, |v| v == 2,
)?;
if has_api_v1 {
#[allow(deprecated)] {
Ok(a.configuration_before_version_2(b)?.into())
}
} else if has_api_v2 {
a.configuration(b).map_err(Into::into)
} else {
Err(sp_blockchain::Error::VersionInvalid(
"Unsupported or invalid BabeApi version".to_string()
))
}
}).map(Self) {
Ok(s) => Ok(s),
Err(s) => {
warn!(target: "babe", "Failed to get slot duration");
Err(s)
}
}
}
/// Get the inner slot duration
pub fn slot_duration(&self) -> Duration {
self.0.slot_duration()
}
}
impl std::ops::Deref for Config {
type Target = BabeGenesisConfiguration;
fn deref(&self) -> &BabeGenesisConfiguration {
&*self.0
}
}
/// Parameters for BABE.
pub struct BabeParams<B: BlockT, C, SC, E, I, SO, L, CIDP, BS, CAW> {
/// The keystore that manages the keys of the node.
pub keystore: SyncCryptoStorePtr,
/// The client to use
pub client: Arc<C>,
/// The SelectChain Strategy
pub select_chain: SC,
/// The environment we are producing blocks for.
pub env: E,
/// The underlying block-import object to supply our produced blocks to.
/// This must be a `BabeBlockImport` or a wrapper of it, otherwise
/// critical consensus logic will be omitted.
pub block_import: I,
/// A sync oracle
pub sync_oracle: SO,
/// Hook into the sync module to control the justification sync process.
pub justification_sync_link: L,
/// Something that can create the inherent data providers.
pub create_inherent_data_providers: CIDP,
/// Force authoring of blocks even if we are offline
pub force_authoring: bool,
/// Strategy and parameters for backing off block production.
pub backoff_authoring_blocks: Option<BS>,
/// The source of timestamps for relative slots
pub babe_link: BabeLink<B>,
/// Checks if the current native implementation can author with a runtime at a given block.
pub can_author_with: CAW,
/// The proportion of the slot dedicated to proposing.
///
/// The block proposing will be limited to this proportion of the slot from the starting of the
/// slot. However, the proposing can still take longer when there is some lenience factor applied,
/// because there were no blocks produced for some slots.
pub block_proposal_slot_portion: SlotProportion,
/// The maximum proportion of the slot dedicated to proposing with any lenience factor applied
/// due to no blocks being produced.
pub max_block_proposal_slot_portion: Option<SlotProportion>,
/// Handle use to report telemetries.
pub telemetry: Option<TelemetryHandle>,
}
/// Start the babe worker.
pub fn start_babe<B, C, SC, E, I, SO, CIDP, BS, CAW, L, Error>(
BabeParams {
keystore,
client,
select_chain,
env,
block_import,
sync_oracle,
justification_sync_link,
create_inherent_data_providers,
force_authoring,
backoff_authoring_blocks,
babe_link,
can_author_with,
block_proposal_slot_portion,
max_block_proposal_slot_portion,
telemetry,
}: BabeParams<B, C, SC, E, I, SO, L, CIDP, BS, CAW>,
) -> Result<BabeWorker<B>, sp_consensus::Error>
where
B: BlockT,
C: ProvideRuntimeApi<B>
+ ProvideCache<B>
+ ProvideUncles<B>
+ BlockchainEvents<B>
+ HeaderBackend<B>
+ HeaderMetadata<B, Error = ClientError>
+ Send
+ Sync
+ 'static,
C::Api: BabeApi<B>,
SC: SelectChain<B> + 'static,
E: Environment<B, Error = Error> + Send + Sync + 'static,
E::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
I: BlockImport<B, Error = ConsensusError, Transaction = sp_api::TransactionFor<C, B>>
+ Send
+ Sync
+ 'static,
SO: SyncOracle + Send + Sync + Clone + 'static,
L: sp_consensus::JustificationSyncLink<B> + 'static,
CIDP: CreateInherentDataProviders<B, ()> + Send + Sync + 'static,
CIDP::InherentDataProviders: InherentDataProviderExt + Send,
BS: BackoffAuthoringBlocksStrategy<NumberFor<B>> + Send + 'static,
CAW: CanAuthorWith<B> + Send + Sync + 'static,
Error: std::error::Error + Send + From<ConsensusError> + From<I::Error> + 'static,
{
const HANDLE_BUFFER_SIZE: usize = 1024;
let config = babe_link.config;
let slot_notification_sinks = Arc::new(Mutex::new(Vec::new()));
let worker = BabeSlotWorker {
client: client.clone(),
block_import,
env,
sync_oracle: sync_oracle.clone(),
justification_sync_link,
force_authoring,
backoff_authoring_blocks,
keystore,
epoch_changes: babe_link.epoch_changes.clone(),
slot_notification_sinks: slot_notification_sinks.clone(),
config: config.clone(),
block_proposal_slot_portion,
max_block_proposal_slot_portion,
telemetry,
};
info!(target: "babe", "👶 Starting BABE Authorship worker");
let inner = sc_consensus_slots::start_slot_worker(
config.0.clone(),
select_chain,
worker,
sync_oracle,
create_inherent_data_providers,
can_author_with,
);
let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE);
let answer_requests = answer_requests(worker_rx, config.0, client, babe_link.epoch_changes.clone());
Ok(BabeWorker {
inner: Box::pin(future::join(inner, answer_requests).map(|_| ())),
slot_notification_sinks,
handle: BabeWorkerHandle(worker_tx),
})
}
async fn answer_requests<B: BlockT, C>(
mut request_rx: Receiver<BabeRequest<B>>,
genesis_config: sc_consensus_slots::SlotDuration<BabeGenesisConfiguration>,
client: Arc<C>,
epoch_changes: SharedEpochChanges<B, Epoch>,
)
where C: ProvideRuntimeApi<B> + ProvideCache<B> + ProvideUncles<B> + BlockchainEvents<B>
+ HeaderBackend<B> + HeaderMetadata<B, Error = ClientError> + Send + Sync + 'static,
{
while let Some(request) = request_rx.next().await {
match request {
BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => {
let lookup = || {
let epoch_changes = epoch_changes.shared_data();
let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of(
descendent_query(&*client),
&parent_hash,
parent_number,
slot_number,
)
.map_err(|e| Error::<B>::ForkTree(Box::new(e)))?
.ok_or_else(|| Error::<B>::FetchEpoch(parent_hash))?;
let viable_epoch = epoch_changes.viable_epoch(
&epoch_descriptor,
|slot| Epoch::genesis(&genesis_config, slot)
).ok_or_else(|| Error::<B>::FetchEpoch(parent_hash))?;
Ok(sp_consensus_babe::Epoch {
epoch_index: viable_epoch.as_ref().epoch_index,
start_slot: viable_epoch.as_ref().start_slot,
duration: viable_epoch.as_ref().duration,
authorities: viable_epoch.as_ref().authorities.clone(),
randomness: viable_epoch.as_ref().randomness,
config: viable_epoch.as_ref().config.clone(),
})
};
let _ = response.send(lookup());
}
}
}
}
/// Requests to the BABE service.
#[non_exhaustive]
pub enum BabeRequest<B: BlockT> {
/// Request the epoch that a child of the given block, with the given slot number would have.
///
/// The parent block is identified by its hash and number.
EpochForChild(
B::Hash,
NumberFor<B>,
Slot,
oneshot::Sender<Result<sp_consensus_babe::Epoch, Error<B>>>,
),
}
/// A handle to the BABE worker for issuing requests.
#[derive(Clone)]
pub struct BabeWorkerHandle<B: BlockT>(Sender<BabeRequest<B>>);
impl<B: BlockT> BabeWorkerHandle<B> {
/// Send a request to the BABE service.
pub async fn send(&mut self, request: BabeRequest<B>) {
// Failure to send means that the service is down.
// This will manifest as the receiver of the request being dropped.
let _ = self.0.send(request).await;
}
}
/// Worker for Babe which implements `Future<Output=()>`. This must be polled.
#[must_use]
pub struct BabeWorker<B: BlockT> {
inner: Pin<Box<dyn futures::Future<Output=()> + Send + 'static>>,
slot_notification_sinks: SlotNotificationSinks<B>,
handle: BabeWorkerHandle<B>,
}
impl<B: BlockT> BabeWorker<B> {
/// Return an event stream of notifications for when new slot happens, and the corresponding
/// epoch descriptor.
pub fn slot_notification_stream(
&self
) -> Receiver<(Slot, ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>)> {
const CHANNEL_BUFFER_SIZE: usize = 1024;
let (sink, stream) = channel(CHANNEL_BUFFER_SIZE);
self.slot_notification_sinks.lock().push(sink);
stream
}
/// Get a handle to the worker.
pub fn handle(&self) -> BabeWorkerHandle<B> {
self.handle.clone()
}
}
impl<B: BlockT> futures::Future for BabeWorker<B> {
type Output = ();
fn poll(
mut self: Pin<&mut Self>,
cx: &mut futures::task::Context
) -> futures::task::Poll<Self::Output> {
self.inner.as_mut().poll(cx)
}
}
/// Slot notification sinks.
type SlotNotificationSinks<B> = Arc<
Mutex<Vec<Sender<(Slot, ViableEpochDescriptor<<B as BlockT>::Hash, NumberFor<B>, Epoch>)>>>
>;
struct BabeSlotWorker<B: BlockT, C, E, I, SO, L, BS> {
client: Arc<C>,
block_import: I,
env: E,
sync_oracle: SO,
justification_sync_link: L,
force_authoring: bool,
backoff_authoring_blocks: Option<BS>,
keystore: SyncCryptoStorePtr,
epoch_changes: SharedEpochChanges<B, Epoch>,
slot_notification_sinks: SlotNotificationSinks<B>,
config: Config,
block_proposal_slot_portion: SlotProportion,
max_block_proposal_slot_portion: Option<SlotProportion>,
telemetry: Option<TelemetryHandle>,
}
impl<B, C, E, I, Error, SO, L, BS> sc_consensus_slots::SimpleSlotWorker<B>
for BabeSlotWorker<B, C, E, I, SO, L, BS>
where
B: BlockT,
C: ProvideRuntimeApi<B>
+ ProvideCache<B>
+ HeaderBackend<B>
+ HeaderMetadata<B, Error = ClientError>,
C::Api: BabeApi<B>,
E: Environment<B, Error = Error>,
E::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync + 'static,
SO: SyncOracle + Send + Clone,
L: sp_consensus::JustificationSyncLink<B>,
BS: BackoffAuthoringBlocksStrategy<NumberFor<B>>,
Error: std::error::Error + Send + From<ConsensusError> + From<I::Error> + 'static,
{
type EpochData = ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>;
type Claim = (PreDigest, AuthorityId);
type SyncOracle = SO;
type JustificationSyncLink = L;
type CreateProposer = Pin<Box<
dyn Future<Output = Result<E::Proposer, sp_consensus::Error>> + Send + 'static
>>;
type Proposer = E::Proposer;
type BlockImport = I;
fn logging_target(&self) -> &'static str {
"babe"
}
fn block_import(&mut self) -> &mut Self::BlockImport {
&mut self.block_import
}
fn epoch_data(
&self,
parent: &B::Header,
slot: Slot,
) -> Result<Self::EpochData, ConsensusError> {
self.epoch_changes.shared_data().epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent.hash(),
parent.number().clone(),
slot,
)
.map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))?
.ok_or(sp_consensus::Error::InvalidAuthoritiesSet)
}
fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option<usize> {
self.epoch_changes
.shared_data()
.viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))
.map(|epoch| epoch.as_ref().authorities.len())
}
fn claim_slot(
&self,
_parent_header: &B::Header,
slot: Slot,
epoch_descriptor: &ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>,
) -> Option<Self::Claim> {
debug!(target: "babe", "Attempting to claim slot {}", slot);
let s = authorship::claim_slot(
slot,
self.epoch_changes.shared_data().viable_epoch(
&epoch_descriptor,
|slot| Epoch::genesis(&self.config, slot)
)?.as_ref(),
&self.keystore,
);
if s.is_some() {
debug!(target: "babe", "Claimed slot {}", slot);
}
s
}
fn notify_slot(
&self,
_parent_header: &B::Header,
slot: Slot,
epoch_descriptor: &ViableEpochDescriptor<B::Hash, NumberFor<B>, Epoch>,
) {
self.slot_notification_sinks.lock()
.retain_mut(|sink| {
match sink.try_send((slot, epoch_descriptor.clone())) {
Ok(()) => true,
Err(e) => {
if e.is_full() {
warn!(target: "babe", "Trying to notify a slot but the channel is full");
true
} else {
false
}
},
}
});
}
fn pre_digest_data(
&self,
_slot: Slot,
claim: &Self::Claim,
) -> Vec<sp_runtime::DigestItem<B::Hash>> {
vec![
<DigestItemFor<B> as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()),
]
}
fn block_import_params(&self) -> Box<dyn Fn(
B::Header,
&B::Hash,
Vec<B::Extrinsic>,
StorageChanges<I::Transaction, B>,
Self::Claim,
Self::EpochData,
) -> Result<
sp_consensus::BlockImportParams<B, I::Transaction>,
sp_consensus::Error> + Send + 'static>
{
let keystore = self.keystore.clone();
Box::new(move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| {
// sign the pre-sealed hash of the block and then
// add it to a digest item.
let public_type_pair = public.clone().into();
let public = public.to_raw_vec();
let signature = SyncCryptoStore::sign_with(
&*keystore,
<AuthorityId as AppKey>::ID,
&public_type_pair,
header_hash.as_ref()
)
.map_err(|e| sp_consensus::Error::CannotSign(
public.clone(), e.to_string(),
))?
.ok_or_else(|| sp_consensus::Error::CannotSign(
public.clone(), "Could not find key in keystore.".into(),
))?;
let signature: AuthoritySignature = signature.clone().try_into()
.map_err(|_| sp_consensus::Error::InvalidSignature(
signature, public
))?;
let digest_item = <DigestItemFor<B> as CompatibleDigestItem>::babe_seal(signature.into());
let mut import_block = BlockImportParams::new(BlockOrigin::Own, header);
import_block.post_digests.push(digest_item);
import_block.body = Some(body);
import_block.state_action = StateAction::ApplyChanges(
sp_consensus::StorageChanges::Changes(storage_changes)
);
import_block.intermediates.insert(
Cow::from(INTERMEDIATE_KEY),
Box::new(BabeIntermediate::<B> { epoch_descriptor }) as Box<_>,
);
Ok(import_block)
})
}
fn force_authoring(&self) -> bool {
self.force_authoring
}
fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool {
if let Some(ref strategy) = self.backoff_authoring_blocks {
if let Ok(chain_head_slot) = find_pre_digest::<B>(chain_head)
.map(|digest| digest.slot())
{
return strategy.should_backoff(
*chain_head.number(),
chain_head_slot,
self.client.info().finalized_number,
slot,
self.logging_target(),
);
}
}
false
}
fn sync_oracle(&mut self) -> &mut Self::SyncOracle {
&mut self.sync_oracle
}
fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink {
&mut self.justification_sync_link
}
fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer {
Box::pin(self.env.init(block).map_err(|e| {
sp_consensus::Error::ClientImport(format!("{:?}", e))
}))
}
fn telemetry(&self) -> Option<TelemetryHandle> {
self.telemetry.clone()
}
fn proposing_remaining_duration(&self, slot_info: &SlotInfo<B>) -> std::time::Duration {
let parent_slot = find_pre_digest::<B>(&slot_info.chain_head).ok().map(|d| d.slot());
sc_consensus_slots::proposing_remaining_duration(
parent_slot,
slot_info,
&self.block_proposal_slot_portion,
self.max_block_proposal_slot_portion.as_ref(),
sc_consensus_slots::SlotLenienceType::Exponential,
self.logging_target(),
)
}
}
/// Extract the BABE pre digest from the given header. Pre-runtime digests are
/// mandatory, the function will return `Err` if none is found.
pub fn find_pre_digest<B: BlockT>(header: &B::Header) -> Result<PreDigest, Error<B>> {
// genesis block doesn't contain a pre digest so let's generate a
// dummy one to not break any invariants in the rest of the code
if header.number().is_zero() {
return Ok(PreDigest::SecondaryPlain(SecondaryPlainPreDigest {
slot: 0.into(),
authority_index: 0,
}));
}
let mut pre_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log);
match (log.as_babe_pre_digest(), pre_digest.is_some()) {
(Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)),
(None, _) => trace!(target: "babe", "Ignoring digest not meant for us"),
(s, false) => pre_digest = s,
}
}
pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest))
}
/// Extract the BABE epoch change digest from the given header, if it exists.
fn find_next_epoch_digest<B: BlockT>(header: &B::Header)
-> Result<Option<NextEpochDescriptor>, Error<B>>
where DigestItemFor<B>: CompatibleDigestItem,
{
let mut epoch_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, epoch_digest.is_some()) {
(Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)),
(Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch),
_ => trace!(target: "babe", "Ignoring digest not meant for us"),
}
}
Ok(epoch_digest)
}
/// Extract the BABE config change digest from the given header, if it exists.
fn find_next_config_digest<B: BlockT>(header: &B::Header)
-> Result<Option<NextConfigDescriptor>, Error<B>>
where DigestItemFor<B>: CompatibleDigestItem,
{
let mut config_digest: Option<_> = None;
for log in header.digest().logs() {
trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log);
let log = log.try_to::<ConsensusLog>(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID));
match (log, config_digest.is_some()) {
(Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)),
(Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config),
_ => trace!(target: "babe", "Ignoring digest not meant for us"),
}
}
Ok(config_digest)
}
/// State that must be shared between the import queue and the authoring logic.
#[derive(Clone)]
pub struct BabeLink<Block: BlockT> {
epoch_changes: SharedEpochChanges<Block, Epoch>,
config: Config,
}
impl<Block: BlockT> BabeLink<Block> {
/// Get the epoch changes of this link.
pub fn epoch_changes(&self) -> &SharedEpochChanges<Block, Epoch> {
&self.epoch_changes
}
/// Get the config of this link.
pub fn config(&self) -> &Config {
&self.config
}
}
/// A verifier for Babe blocks.
pub struct BabeVerifier<Block: BlockT, Client, SelectChain, CAW, CIDP> {
client: Arc<Client>,
select_chain: SelectChain,
create_inherent_data_providers: CIDP,
config: Config,
epoch_changes: SharedEpochChanges<Block, Epoch>,
can_author_with: CAW,
telemetry: Option<TelemetryHandle>,
}
impl<Block, Client, SelectChain, CAW, CIDP> BabeVerifier<Block, Client, SelectChain, CAW, CIDP>
where
Block: BlockT,
Client: AuxStore + HeaderBackend<Block> + HeaderMetadata<Block> + ProvideRuntimeApi<Block>,
Client::Api: BlockBuilderApi<Block> + BabeApi<Block>,
SelectChain: sp_consensus::SelectChain<Block>,
CAW: CanAuthorWith<Block>,
CIDP: CreateInherentDataProviders<Block, ()>,
{
async fn check_inherents(
&self,
block: Block,
block_id: BlockId<Block>,
inherent_data: InherentData,
create_inherent_data_providers: CIDP::InherentDataProviders,
) -> Result<(), Error<Block>> {
if let Err(e) = self.can_author_with.can_author_with(&block_id) {
debug!(
target: "babe",
"Skipping `check_inherents` as authoring version is not compatible: {}",
e,
);
return Ok(())
}
let inherent_res = self.client.runtime_api().check_inherents(
&block_id,
block,
inherent_data,
).map_err(Error::RuntimeApi)?;
if !inherent_res.ok() {
for (i, e) in inherent_res.into_errors() {
match create_inherent_data_providers.try_handle_error(&i, &e).await {
Some(res) => res.map_err(|e| Error::CheckInherents(e))?,
None => return Err(Error::CheckInherentsUnhandled(i)),
}
}
}
Ok(())
}
async fn check_and_report_equivocation(
&self,
slot_now: Slot,
slot: Slot,
header: &Block::Header,
author: &AuthorityId,
origin: &BlockOrigin,
) -> Result<(), Error<Block>> {
// don't report any equivocations during initial sync
// as they are most likely stale.
if *origin == BlockOrigin::NetworkInitialSync {
return Ok(());
}
// check if authorship of this header is an equivocation and return a proof if so.
let equivocation_proof =
match check_equivocation(&*self.client, slot_now, slot, header, author)
.map_err(Error::Client)?
{
Some(proof) => proof,
None => return Ok(()),
};
info!(
"Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}",
author,
slot,
equivocation_proof.first_header.hash(),
equivocation_proof.second_header.hash(),
);
// get the best block on which we will build and send the equivocation report.
let best_id = self
.select_chain
.best_chain()
.await
.map(|h| BlockId::Hash(h.hash()))
.map_err(|e| Error::Client(e.into()))?;
// generate a key ownership proof. we start by trying to generate the
// key owernship proof at the parent of the equivocating header, this
// will make sure that proof generation is successful since it happens
// during the on-going session (i.e. session keys are available in the
// state to be able to generate the proof). this might fail if the
// equivocation happens on the first block of the session, in which case
// its parent would be on the previous session. if generation on the
// parent header fails we try with best block as well.
let generate_key_owner_proof = |block_id: &BlockId<Block>| {
self.client
.runtime_api()
.generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone())
.map_err(Error::RuntimeApi)
};
let parent_id = BlockId::Hash(*header.parent_hash());
let key_owner_proof = match generate_key_owner_proof(&parent_id)? {
Some(proof) => proof,
None => match generate_key_owner_proof(&best_id)? {
Some(proof) => proof,
None => {
debug!(target: "babe", "Equivocation offender is not part of the authority set.");
return Ok(());
}
},
};
// submit equivocation report at best block.
self.client
.runtime_api()
.submit_report_equivocation_unsigned_extrinsic(
&best_id,
equivocation_proof,
key_owner_proof,
)
.map_err(Error::RuntimeApi)?;
info!(target: "babe", "Submitted equivocation report for author {:?}", author);
Ok(())
}
}
type BlockVerificationResult<Block> = Result<
(
BlockImportParams<Block, ()>,
Option<Vec<(CacheKeyId, Vec<u8>)>>,
),
String,
>;
#[async_trait::async_trait]
impl<Block, Client, SelectChain, CAW, CIDP> Verifier<Block>
for BabeVerifier<Block, Client, SelectChain, CAW, CIDP>
where
Block: BlockT,
Client: HeaderMetadata<Block, Error = sp_blockchain::Error>
+ HeaderBackend<Block>
+ ProvideRuntimeApi<Block>
+ Send
+ Sync
+ AuxStore
+ ProvideCache<Block>,
Client::Api: BlockBuilderApi<Block> + BabeApi<Block>,
SelectChain: sp_consensus::SelectChain<Block>,
CAW: CanAuthorWith<Block> + Send + Sync,
CIDP: CreateInherentDataProviders<Block, ()> + Send + Sync,
CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync,
{
async fn verify(
&mut self,
origin: BlockOrigin,
header: Block::Header,
justifications: Option<Justifications>,
mut body: Option<Vec<Block::Extrinsic>>,
) -> BlockVerificationResult<Block> {
trace!(
target: "babe",
"Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}",
origin,
header,
justifications,
body,
);
let hash = header.hash();
let parent_hash = *header.parent_hash();
debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len());
let create_inherent_data_providers = self
.create_inherent_data_providers
.create_inherent_data_providers(parent_hash, ())
.await
.map_err(|e| Error::<Block>::Client(sp_consensus::Error::from(e).into()))?;
let slot_now = create_inherent_data_providers.slot();
let parent_header_metadata = self.client.header_metadata(parent_hash)
.map_err(Error::<Block>::FetchParentHeader)?;
let pre_digest = find_pre_digest::<Block>(&header)?;
let (check_header, epoch_descriptor) = {
let epoch_changes = self.epoch_changes.shared_data();
let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of(
descendent_query(&*self.client),
&parent_hash,
parent_header_metadata.number,
pre_digest.slot(),
)
.map_err(|e| Error::<Block>::ForkTree(Box::new(e)))?
.ok_or_else(|| Error::<Block>::FetchEpoch(parent_hash))?;
let viable_epoch = epoch_changes.viable_epoch(
&epoch_descriptor,
|slot| Epoch::genesis(&self.config, slot)
).ok_or_else(|| Error::<Block>::FetchEpoch(parent_hash))?;
// We add one to the current slot to allow for some small drift.
// FIXME #1019 in the future, alter this queue to allow deferring of headers
let v_params = verification::VerificationParams {
header: header.clone(),
pre_digest: Some(pre_digest),
slot_now: slot_now + 1,
epoch: viable_epoch.as_ref(),
};
(verification::check_header::<Block>(v_params)?, epoch_descriptor)
};
match check_header {
CheckedHeader::Checked(pre_header, verified_info) => {
let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest()
.expect("check_header always returns a pre-digest digest item; qed");
let slot = babe_pre_digest.slot();
// the header is valid but let's check if there was something else already
// proposed at the same slot by the given author. if there was, we will
// report the equivocation to the runtime.
if let Err(err) = self.check_and_report_equivocation(
slot_now,
slot,
&header,
&verified_info.author,
&origin,
).await {
warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err);
}
// if the body is passed through, we need to use the runtime
// to check that the internally-set timestamp in the inherents
// actually matches the slot set in the seal.
if let Some(inner_body) = body.take() {
let mut inherent_data = create_inherent_data_providers.create_inherent_data()
.map_err(Error::<Block>::CreateInherents)?;
inherent_data.babe_replace_inherent_data(slot);
let block = Block::new(pre_header.clone(), inner_body);
self.check_inherents(
block.clone(),
BlockId::Hash(parent_hash),
inherent_data,
create_inherent_data_providers,
).await?;
let (_, inner_body) = block.deconstruct();
body = Some(inner_body);
}
trace!(target: "babe", "Checked {:?}; importing.", pre_header);
telemetry!(
self.telemetry;
CONSENSUS_TRACE;
"babe.checked_and_importing";
"pre_header" => ?pre_header,
);
let mut import_block = BlockImportParams::new(origin, pre_header);
import_block.post_digests.push(verified_info.seal);
import_block.body = body;
import_block.justifications = justifications;
import_block.intermediates.insert(
Cow::from(INTERMEDIATE_KEY),
Box::new(BabeIntermediate::<Block> { epoch_descriptor }) as Box<_>,
);
import_block.post_hash = Some(hash);
Ok((import_block, Default::default()))
}
CheckedHeader::Deferred(a, b) => {
debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
telemetry!(
self.telemetry;
CONSENSUS_DEBUG;
"babe.header_too_far_in_future";
"hash" => ?hash, "a" => ?a, "b" => ?b
);
Err(Error::<Block>::TooFarInFuture(hash).into())
}
}
}
}
/// A block-import handler for BABE.
///
/// This scans each imported block for epoch change signals. The signals are
/// tracked in a tree (of all forks), and the import logic validates all epoch
/// change transitions, i.e. whether a given epoch change is expected or whether
/// it is missing.
///
/// The epoch change tree should be pruned as blocks are finalized.
pub struct BabeBlockImport<Block: BlockT, Client, I> {
inner: I,
client: Arc<Client>,
epoch_changes: SharedEpochChanges<Block, Epoch>,
config: Config,
}
impl<Block: BlockT, I: Clone, Client> Clone for BabeBlockImport<Block, Client, I> {
fn clone(&self) -> Self {
BabeBlockImport {
inner: self.inner.clone(),
client: self.client.clone(),
epoch_changes: self.epoch_changes.clone(),
config: self.config.clone(),
}
}
}
impl<Block: BlockT, Client, I> BabeBlockImport<Block, Client, I> {
fn new(
client: Arc<Client>,
epoch_changes: SharedEpochChanges<Block, Epoch>,
block_import: I,
config: Config,
) -> Self {
BabeBlockImport {
client,
inner: block_import,
epoch_changes,
config,
}
}
}
#[async_trait::async_trait]
impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client, Inner> where
Block: BlockT,
Inner: BlockImport<Block, Transaction = sp_api::TransactionFor<Client, Block>> + Send + Sync,
Inner::Error: Into<ConsensusError>,
Client: HeaderBackend<Block> + HeaderMetadata<Block, Error = sp_blockchain::Error>
+ AuxStore + ProvideRuntimeApi<Block> + ProvideCache<Block> + Send + Sync,
Client::Api: BabeApi<Block> + ApiExt<Block>,
{
type Error = ConsensusError;
type Transaction = sp_api::TransactionFor<Client, Block>;
async fn import_block(
&mut self,
mut block: BlockImportParams<Block, Self::Transaction>,
new_cache: HashMap<CacheKeyId, Vec<u8>>,
) -> Result<ImportResult, Self::Error> {
let hash = block.post_hash();
let number = *block.header.number();
// early exit if block already in chain, otherwise the check for
// epoch changes will error when trying to re-import an epoch change
match self.client.status(BlockId::Hash(hash)) {
Ok(sp_blockchain::BlockStatus::InChain) => {
// When re-importing existing block strip away intermediates.
let _ = block.take_intermediate::<BabeIntermediate<Block>>(INTERMEDIATE_KEY)?;
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
return self.inner.import_block(block, new_cache).await.map_err(Into::into)
},
Ok(sp_blockchain::BlockStatus::Unknown) => {},
Err(e) => return Err(ConsensusError::ClientImport(e.to_string())),
}
let pre_digest = find_pre_digest::<Block>(&block.header)
.expect("valid babe headers must contain a predigest; \
header has been already verified; qed");
let slot = pre_digest.slot();
let parent_hash = *block.header.parent_hash();
let parent_header = self.client.header(BlockId::Hash(parent_hash))
.map_err(|e| ConsensusError::ChainLookup(e.to_string()))?
.ok_or_else(|| ConsensusError::ChainLookup(babe_err(
Error::<Block>::ParentUnavailable(parent_hash, hash)
).into()))?;
let parent_slot = find_pre_digest::<Block>(&parent_header)
.map(|d| d.slot())
.expect("parent is non-genesis; valid BABE headers contain a pre-digest; \
header has already been verified; qed");
// make sure that slot number is strictly increasing
if slot <= parent_slot {
return Err(
ConsensusError::ClientImport(babe_err(
Error::<Block>::SlotMustIncrease(parent_slot, slot)
).into())
);
}
// if there's a pending epoch we'll save the previous epoch changes here
// this way we can revert it if there's any error
let mut old_epoch_changes = None;
// Use an extra scope to make the compiler happy, because otherwise he complains about the
// mutex, even if we dropped it...
let mut epoch_changes = {
let mut epoch_changes = self.epoch_changes.shared_data_locked();
// check if there's any epoch change expected to happen at this slot.
// `epoch` is the epoch to verify the block under, and `first_in_epoch` is true
// if this is the first block in its chain for that epoch.
//
// also provides the total weight of the chain, including the imported block.
let (epoch_descriptor, first_in_epoch, parent_weight) = {
let parent_weight = if *parent_header.number() == Zero::zero() {
0
} else {
aux_schema::load_block_weight(&*self.client, parent_hash)
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
.ok_or_else(|| ConsensusError::ClientImport(
babe_err(Error::<Block>::ParentBlockNoAssociatedWeight(hash)).into()
))?
};
let intermediate = block.take_intermediate::<BabeIntermediate<Block>>(
INTERMEDIATE_KEY
)?;
let epoch_descriptor = intermediate.epoch_descriptor;
let first_in_epoch = parent_slot < epoch_descriptor.start_slot();
(epoch_descriptor, first_in_epoch, parent_weight)
};
let total_weight = parent_weight + pre_digest.added_weight();
// search for this all the time so we can reject unexpected announcements.
let next_epoch_digest = find_next_epoch_digest::<Block>(&block.header)
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?;
let next_config_digest = find_next_config_digest::<Block>(&block.header)
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?;
match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) {
(true, true, _) => {},
(false, false, false) => {},
(false, false, true) => {
return Err(
ConsensusError::ClientImport(
babe_err(Error::<Block>::UnexpectedConfigChange).into(),
)
)
},
(true, false, _) => {
return Err(
ConsensusError::ClientImport(
babe_err(Error::<Block>::ExpectedEpochChange(hash, slot)).into(),
)
)
},
(false, true, _) => {
return Err(
ConsensusError::ClientImport(
babe_err(Error::<Block>::UnexpectedEpochChange).into(),
)
)
},
}
let info = self.client.info();
if let Some(next_epoch_descriptor) = next_epoch_digest {
old_epoch_changes = Some((*epoch_changes).clone());
let viable_epoch = epoch_changes.viable_epoch(
&epoch_descriptor,
|slot| Epoch::genesis(&self.config, slot)
).ok_or_else(|| {
ConsensusError::ClientImport(Error::<Block>::FetchEpoch(parent_hash).into())
})?;
let epoch_config = next_config_digest.map(Into::into).unwrap_or_else(
|| viable_epoch.as_ref().config.clone()
);
// restrict info logging during initial sync to avoid spam
let log_level = if block.origin == BlockOrigin::NetworkInitialSync {
log::Level::Debug
} else {
log::Level::Info
};
log!(target: "babe",
log_level,
"👶 New epoch {} launching at block {} (block slot {} >= start slot {}).",
viable_epoch.as_ref().epoch_index,
hash,
slot,
viable_epoch.as_ref().start_slot,
);
let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config));
log!(target: "babe",
log_level,
"👶 Next epoch starts at slot {}",
next_epoch.as_ref().start_slot,
);
// prune the tree of epochs not part of the finalized chain or
// that are not live anymore, and then track the given epoch change
// in the tree.
// NOTE: it is important that these operations are done in this
// order, otherwise if pruning after import the `is_descendent_of`
// used by pruning may not know about the block that is being
// imported.
let prune_and_import = || {
prune_finalized(
self.client.clone(),
&mut epoch_changes,
)?;
epoch_changes.import(
descendent_query(&*self.client),
hash,
number,
*block.header.parent_hash(),
next_epoch,
).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?;
Ok(())
};
if let Err(e) = prune_and_import() {
debug!(target: "babe", "Failed to launch next epoch: {:?}", e);
*epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed");
return Err(e);
}
crate::aux_schema::write_epoch_changes::<Block, _, _>(
&*epoch_changes,
|insert| block.auxiliary.extend(
insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
)
);
}
aux_schema::write_block_weight(
hash,
total_weight,
|values| block.auxiliary.extend(
values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
),
);
// The fork choice rule is that we pick the heaviest chain (i.e.
// more primary blocks), if there's a tie we go with the longest
// chain.
block.fork_choice = {
let (last_best, last_best_number) = (info.best_hash, info.best_number);
let last_best_weight = if &last_best == block.header.parent_hash() {
// the parent=genesis case is already covered for loading parent weight,
// so we don't need to cover again here.
parent_weight
} else {
aux_schema::load_block_weight(&*self.client, last_best)
.map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))?
.ok_or_else(
|| ConsensusError::ChainLookup("No block weight for parent header.".to_string())
)?
};
Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight {
true
} else if total_weight == last_best_weight {
number > last_best_number
} else {
false
}))
};
// Release the mutex, but it stays locked
epoch_changes.release_mutex()
};
let import_result = self.inner.import_block(block, new_cache).await;
// revert to the original epoch changes in case there's an error
// importing the block
if import_result.is_err() {
if let Some(old_epoch_changes) = old_epoch_changes {
*epoch_changes.upgrade() = old_epoch_changes;
}
}
import_result.map_err(Into::into)
}
async fn check_block(
&mut self,
block: BlockCheckParams<Block>,
) -> Result<ImportResult, Self::Error> {
self.inner.check_block(block).await.map_err(Into::into)
}
}
/// Gets the best finalized block and its slot, and prunes the given epoch tree.
fn prune_finalized<Block, Client>(
client: Arc<Client>,
epoch_changes: &mut EpochChangesFor<Block, Epoch>,
) -> Result<(), ConsensusError> where
Block: BlockT,
Client: HeaderBackend<Block> + HeaderMetadata<Block, Error = sp_blockchain::Error>,
{
let info = client.info();
let finalized_slot = {
let finalized_header = client.header(BlockId::Hash(info.finalized_hash))
.map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?
.expect("best finalized hash was given by client; \
finalized headers must exist in db; qed");
find_pre_digest::<Block>(&finalized_header)
.expect("finalized header must be valid; \
valid blocks have a pre-digest; qed")
.slot()
};
epoch_changes.prune_finalized(
descendent_query(&*client),
&info.finalized_hash,
info.finalized_number,
finalized_slot,
).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?;
Ok(())
}
/// Produce a BABE block-import object to be used later on in the construction of
/// an import-queue.
///
/// Also returns a link object used to correctly instantiate the import queue
/// and background worker.
pub fn block_import<Client, Block: BlockT, I>(
config: Config,
wrapped_block_import: I,
client: Arc<Client>,
) -> ClientResult<(BabeBlockImport<Block, Client, I>, BabeLink<Block>)>
where
Client: AuxStore + HeaderBackend<Block> + HeaderMetadata<Block, Error = sp_blockchain::Error>,
{
let epoch_changes = aux_schema::load_epoch_changes::<Block, _>(&*client, &config)?;
let link = BabeLink {
epoch_changes: epoch_changes.clone(),
config: config.clone(),
};
// NOTE: this isn't entirely necessary, but since we didn't use to prune the
// epoch tree it is useful as a migration, so that nodes prune long trees on
// startup rather than waiting until importing the next epoch change block.
prune_finalized(
client.clone(),
&mut epoch_changes.shared_data(),
)?;
let import = BabeBlockImport::new(
client,
epoch_changes,
wrapped_block_import,
config,
);
Ok((import, link))
}
/// Start an import queue for the BABE consensus algorithm.
///
/// This method returns the import queue, some data that needs to be passed to the block authoring
/// logic (`BabeLink`), and a future that must be run to
/// completion and is responsible for listening to finality notifications and
/// pruning the epoch changes tree.
///
/// The block import object provided must be the `BabeBlockImport` or a wrapper
/// of it, otherwise crucial import logic will be omitted.
pub fn import_queue<Block: BlockT, Client, SelectChain, Inner, CAW, CIDP>(
babe_link: BabeLink<Block>,
block_import: Inner,
justification_import: Option<BoxJustificationImport<Block>>,
client: Arc<Client>,
select_chain: SelectChain,
create_inherent_data_providers: CIDP,
spawner: &impl sp_core::traits::SpawnEssentialNamed,
registry: Option<&Registry>,
can_author_with: CAW,
telemetry: Option<TelemetryHandle>,
) -> ClientResult<DefaultImportQueue<Block, Client>> where
Inner: BlockImport<Block, Error = ConsensusError, Transaction = sp_api::TransactionFor<Client, Block>>
+ Send + Sync + 'static,
Client: ProvideRuntimeApi<Block> + ProvideCache<Block> + HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + AuxStore
+ Send + Sync + 'static,
Client::Api: BlockBuilderApi<Block> + BabeApi<Block> + ApiExt<Block>,
SelectChain: sp_consensus::SelectChain<Block> + 'static,
CAW: CanAuthorWith<Block> + Send + Sync + 'static,
CIDP: CreateInherentDataProviders<Block, ()> + Send + Sync + 'static,
CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync,
{
let verifier = BabeVerifier {
select_chain,
create_inherent_data_providers,
config: babe_link.config,
epoch_changes: babe_link.epoch_changes,
can_author_with,
telemetry,
client,
};
Ok(BasicQueue::new(
verifier,
Box::new(block_import),
justification_import,
spawner,
registry,
))
}
| 32.78219 | 109 | 0.704445 |
ebeeeb2c52cf2b0089c60cbfcaf682df813bec7d | 5,629 | use std::f64::consts::PI;
use clap::Clap;
use geo::*;
use geo::prelude::*;
use gre::*;
use rand::prelude::*;
use rayon::prelude::*;
use svg::node::element::{Group, path::Data};
#[derive(Clap)]
#[clap()]
struct Opts {
#[clap(short, long, default_value = "1.0")]
seed: f64,
}
fn make_polygon(x: f64, y: f64, size: f64, angle: f64) -> Polygon<f64> {
let count = 4;
Polygon::new(
LineString::from(
(0..count)
.map(|i| {
let a = angle + 2. * PI * i as f64 / (count as f64);
(x + size * a.cos(), y + size * a.sin())
})
.collect::<Vec<(f64, f64)>>()
),
vec![]
)
}
fn poly_collides_in_polys(polys: &Vec<Polygon<f64>>, poly: &Polygon<f64>) -> bool {
polys.iter().any(|p| {
poly.intersects(p)
})
}
fn scaling_search<F: FnMut(f64) -> bool>(
mut f: F,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let mut from = min_scale;
let mut to = max_scale;
loop {
if !f(from) {
return None;
}
if to - from < 0.1 {
return Some(from);
}
let middle = (to + from) / 2.0;
if !f(middle) {
to = middle;
}
else {
from = middle;
}
}
}
fn search(
container: &Polygon<f64>,
polys: &Vec<Polygon<f64>>,
x: f64,
y: f64,
angle: f64,
min_scale: f64,
max_scale: f64,
) -> Option<f64> {
let overlaps = |size| {
let p = &make_polygon(x, y, size, angle);
container.contains(p) &&
!poly_collides_in_polys(polys, p)
};
scaling_search(overlaps, min_scale, max_scale)
}
fn packing<F: FnMut(usize) -> f64>(
seed: f64,
iterations: usize,
desired_count: usize,
optimize_size: usize,
pad: f64,
container: &Polygon<f64>,
min_scale: f64,
mut max_scale: F,
) -> Vec<Polygon<f64>> {
let mut polys = Vec::new();
let mut tries = Vec::new();
let mut rng = rng_from_seed(seed);
let bounds = container.bounding_rect().unwrap();
let (x1, y1) = bounds.min().x_y();
let x2 = x1 + bounds.width();
let y2 = y1 + bounds.height();
for _i in 0..iterations {
let x: f64 = rng.gen_range(x1, x2);
let y: f64 = rng.gen_range(y1, y2);
let angle = rng.gen_range(0f64, 2. * PI);
if let Some(size) = search(&container, &polys, x, y, angle, min_scale, max_scale(polys.len())) {
tries.push((x, y, size - pad, angle));
if tries.len() > optimize_size {
tries.sort_by(|a, b| b.2.partial_cmp(&a.2).unwrap());
let (x, y, s, a) = tries[0];
let p = make_polygon(x, y, s, a);
polys.push(p);
tries = Vec::new();
}
}
if polys.len() > desired_count {
break;
}
}
polys
}
fn art(opts: Opts) -> Vec<Group> {
let width = 297.0;
let height = 210.0;
let pad = 20.0;
let stroke_width = 0.35;
let container = Polygon::new(
LineString::from(vec![
(pad, pad),
(width-pad, pad),
(width-pad, height-pad),
(pad, height-pad),
]),
vec![]
);
let routes =
packing(
opts.seed,
200000,
4000,
4,
0.45,
&container,
0.6,
|i| 60.0 / (1.0 + i as f64 * 0.5).min(4.0)
)
.par_iter()
.map(|poly| {
let bounds = poly.bounding_rect().unwrap();
let (x1, y1) = bounds.min().x_y();
let x2 = x1 + bounds.width();
let y2 = y1 + bounds.height();
let f = |p: (f64, f64)| {
(x1 + p.0 * (x2 - x1), y1 + p.1 * (y2 - y1))
};
let mut rng = rng_from_seed(opts.seed + 7.77 * x1 + y1 / 3.);
let mut candidates =
sample_2d_candidates(
&|p| {
let q = f(p);
poly.intersects(&Point::from(q))
},
400,
8 + (0.6 * bounds.width() * bounds.height()) as usize,
&mut rng,
);
candidates = candidates
.iter()
.map(|&p| f(p))
.collect();
let mut spiral = route_spiral(candidates);
if spiral.len() < 3 {
return vec![];
}
spiral[0] = (
(spiral[0].0 + spiral[1].0) / 2.,
(spiral[0].1 + spiral[1].1) / 2.,
);
spiral
})
.collect::<Vec<_>>();
let mut layers = Vec::new();
let colors = vec!["steelblue", "brown"];
for (ci, color) in colors.iter().enumerate() {
let mut l = layer(color);
if ci == 0 {
l = l.add(signature(
0.8,
(255.0, 190.0),
color,
));
}
let mut data = Data::new();
for (i, route) in routes.iter().enumerate() {
if i % colors.len() == ci {
data = render_route_curve(data, route.clone());
}
}
l = l.add(base_path(color, stroke_width, data));
layers.push(l);
}
layers
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(opts);
let mut document = base_a4_landscape("white");
for g in groups {
document = document.add(g);
}
svg::save("image.svg", &document).unwrap();
}
| 25.703196 | 104 | 0.448392 |
0aee94c1596d4156226817af2a83ae9d4951eff1 | 7,728 | use std::f64::consts::FRAC_PI_2;
use nalgebra::{Point, TAffine, Transform, Translation, Vector};
use parry3d_f64::query::{Ray, RayCast as _};
use winit::dpi::PhysicalPosition;
use crate::{
math::{Aabb, Scalar, Triangle},
window::Window,
};
/// The camera abstraction
///
/// Please note that the metaphor we're using (which influences how mouse input
/// is handled, for example) is not that of a camera freely flying through a
/// static scene. Instead, the camera is static, and the model is freely
/// translated and rotated.
#[derive(Debug)]
pub struct Camera {
/// The distance to the near plane
near_plane: f64,
/// The distance to the far plane
far_plane: f64,
/// The rotational part of the transform
///
/// This is not an `nalgebra::Rotation`, as rotations happen around a center
/// point, which means they must include a translational component.
pub rotation: Transform<f64, TAffine, 3>,
pub translation: Translation<f64, 3>,
}
impl Camera {
const DEFAULT_NEAR_PLANE: f64 = 0.0001;
const DEFAULT_FAR_PLANE: f64 = 1000.0;
const INITIAL_FIELD_OF_VIEW_IN_X: f64 = FRAC_PI_2; // 90 degrees
pub fn new(aabb: &Aabb<3>) -> Self {
let initial_distance = {
// Let's make sure we choose a distance, so that the model fills
// most of the screen.
//
// To do that, first compute the model's highest point, as well as
// the furthest point from the origin, in x and y.
let highest_point = aabb.max.z;
let furthest_point =
[aabb.min.x.abs(), aabb.max.x, aabb.min.y.abs(), aabb.max.y]
.into_iter()
.reduce(Scalar::max)
// `reduce` can only return `None`, if there are no items in
// the iterator. And since we're creating an array full of
// items above, we know this can't panic.
.unwrap();
// The actual furthest point is not far enough. We don't want the
// model to fill the whole screen.
let furthest_point = furthest_point * 2.;
// Having computed those points, figuring out how far the camera
// needs to be from the model is just a bit of trigonometry.
let distance_from_model =
furthest_point / (Self::INITIAL_FIELD_OF_VIEW_IN_X / 2.).atan();
// An finally, the distance from the origin is trivial now.
highest_point + distance_from_model
};
let initial_offset = {
let mut offset = aabb.center();
offset.z = Scalar::ZERO;
-offset
};
Self {
near_plane: Self::DEFAULT_NEAR_PLANE,
far_plane: Self::DEFAULT_FAR_PLANE,
rotation: Transform::identity(),
translation: Translation::from([
initial_offset.x.into_f64(),
initial_offset.y.into_f64(),
-initial_distance.into_f64(),
]),
}
}
pub fn near_plane(&self) -> f64 {
self.near_plane
}
pub fn far_plane(&self) -> f64 {
self.far_plane
}
pub fn field_of_view_in_x(&self) -> f64 {
Self::INITIAL_FIELD_OF_VIEW_IN_X
}
pub fn position(&self) -> Point<f64, 3> {
self.camera_to_model()
.inverse_transform_point(&Point::origin())
}
/// Transform the position of the cursor on the near plane to model space
pub fn cursor_to_model_space(
&self,
cursor: PhysicalPosition<f64>,
window: &Window,
) -> Point<f64, 3> {
let width = window.width() as f64;
let height = window.height() as f64;
let aspect_ratio = width / height;
// Cursor position in normalized coordinates (-1 to +1) with
// aspect ratio taken into account.
let x = cursor.x / width * 2. - 1.;
let y = -(cursor.y / height * 2. - 1.) / aspect_ratio;
// Cursor position in camera space.
let f = (self.field_of_view_in_x() / 2.).tan() * self.near_plane();
let cursor =
Point::origin() + Vector::from([x * f, y * f, -self.near_plane()]);
self.camera_to_model().inverse_transform_point(&cursor)
}
/// Compute the point on the model, that the cursor currently points to
pub fn focus_point(
&self,
window: &Window,
cursor: Option<PhysicalPosition<f64>>,
triangles: &[Triangle<3>],
) -> FocusPoint {
let cursor = match cursor {
Some(cursor) => cursor,
None => return FocusPoint::none(),
};
// Transform camera and cursor positions to model space.
let origin = self.position();
let cursor = self.cursor_to_model_space(cursor, window);
let dir = (cursor - origin).normalize();
let ray = Ray { origin, dir };
let mut min_t = None;
for triangle in triangles {
let t =
triangle
.to_parry()
.cast_local_ray(&ray, f64::INFINITY, true);
if let Some(t) = t {
if t <= min_t.unwrap_or(t) {
min_t = Some(t);
}
}
}
FocusPoint(min_t.map(|t| ray.point_at(t)))
}
/// Access the transform from camera to model space
pub fn camera_to_model(&self) -> Transform<f64, TAffine, 3> {
// Using a mutable variable cleanly takes care of any type inference
// problems that this operation would otherwise have.
let mut transform = Transform::identity();
transform *= self.translation;
transform *= self.rotation;
transform
}
pub fn update_planes(&mut self, aabb: &Aabb<3>) {
let view_transform = self.camera_to_model();
let view_direction = Vector::from([0., 0., -1.]);
let mut dist_min = f64::INFINITY;
let mut dist_max = f64::NEG_INFINITY;
for vertex in aabb.vertices() {
let point = view_transform.transform_point(&vertex.to_na());
// Project `point` onto `view_direction`. See this Wikipedia page:
// https://en.wikipedia.org/wiki/Vector_projection
//
// Let's rename the variables first, so they fit the names in that
// page.
let (a, b) = (point.coords, view_direction);
let a1 = a.dot(&b) / b.dot(&b) * b;
let dist = a1.magnitude();
if dist < dist_min {
dist_min = dist;
}
if dist > dist_max {
dist_max = dist;
}
}
self.near_plane = if dist_min > 0. {
// Setting `self.near_plane` to `dist_min` should theoretically
// work, but results in the front of the model being clipped. I
// wasn't able to figure out why, and for the time being, this
// factor seems to work well enough.
dist_min * 0.5
} else {
Self::DEFAULT_NEAR_PLANE
};
self.far_plane = if dist_max > 0. {
dist_max
} else {
Self::DEFAULT_FAR_PLANE
};
}
}
/// The point on the model that the cursor is currently pointing at
///
/// Such a point might or might not exist, depending on whether the cursor is
/// pointing at the model or not.
pub struct FocusPoint(pub Option<Point<f64, 3>>);
impl FocusPoint {
/// Construct the "none" instance of `FocusPoint`
///
/// This instance represents the case that no focus point exists.
pub fn none() -> Self {
Self(None)
}
}
| 32.470588 | 80 | 0.567158 |
e454411b1136c2623f2f7eeacf8bd65491dfcd49 | 743 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
component_events::{events::*, matcher::*},
fuchsia_async as fasync,
};
#[fasync::run_singlethreaded]
async fn main() {
let event_source = EventSource::new().unwrap();
let mut event_stream =
event_source.take_static_event_stream("ScopedEventStream").await.unwrap();
EventMatcher::ok().moniker("./echo_server").wait::<Started>(&mut event_stream).await.unwrap();
EventMatcher::ok()
.stop(Some(ExitStatusMatcher::Clean))
.moniker("./echo_server")
.wait::<Stopped>(&mut event_stream)
.await
.unwrap();
}
| 32.304348 | 98 | 0.66891 |
1a6bca9eb72b9c327b78e070ac2693791a8efd55 | 1,193 | use crate::base::opcode::Operand;
#[derive(Debug, Clone, PartialEq)]
pub enum Literal {
Integer(String),
Float(String),
String(String),
True,
False,
Null,
Unit,
}
impl Literal {
pub fn to_operand(&self) -> Operand {
match &self {
Literal::Integer(v) => Operand::Integer(v.clone()),
Literal::Float(v) => Operand::Float(v.clone()),
Literal::String(v) => Operand::String(v.clone()),
Literal::True => Operand::True,
Literal::False => Operand::False,
Literal::Null => Operand::Null,
Literal::Unit => Operand::Unit,
}
}
// pub fn from_token(tok: LogosToken) -> Option<Literal> {
// match tok {
// LogosToken::True => Some(Literal::True),
// LogosToken::False => Some(Literal::False),
// LogosToken::Null => Some(Literal::Null),
// LogosToken::Integer(s) => Some(Literal::Integer(s.to_string())),
// LogosToken::Float(s) => Some(Literal::Float(s.to_string())),
// LogosToken::String(s) => Some(Literal::String(s.to_string())),
// _ => None,
// }
// }
}
| 30.589744 | 79 | 0.525566 |
71c1d63cf5af3d41c2e688c7d6049c61b9d955bd | 955 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
#![feature(rustc_attrs)]
enum Sexpression {
Num(()),
Cons(&'static mut Sexpression)
}
fn causes_error_in_ast(mut l: &mut Sexpression) {
loop { match l {
&mut Sexpression::Num(ref mut n) => {},
&mut Sexpression::Cons(ref mut expr) => { //[ast]~ ERROR [E0499]
l = &mut **expr; //[ast]~ ERROR [E0506]
}
}}
}
#[rustc_error]
fn main() { //[mir]~ ERROR compilation successful
}
| 28.939394 | 72 | 0.660733 |
67490d5aa337f124b80f21239acb9ef6d51c8566 | 27,870 | /// body parsing functions
///
/// This module contains body parsing for the following mime types:
///
/// * json
/// * xml
/// * multipart/form-data
/// * urlencoded forms
///
/// The main function, parse_body, is the only exported function.
///
use multipart::server::Multipart;
use serde_json::{json, Value};
use std::io::Read;
use xmlparser::{ElementEnd, EntityDefinition, ExternalId, Token};
use crate::config::raw::ContentType;
use crate::config::utils::DataSource;
use crate::interface::{Action, ActionType};
use crate::logs::Logs;
use crate::requestfields::RequestField;
use crate::utils::decoders::parse_urlencoded_params_bytes;
mod graphql;
fn json_path(prefix: &[String]) -> String {
if prefix.is_empty() {
"JSON_ROOT".to_string()
} else {
prefix.join("_")
}
}
/// flatten a JSON tree into the RequestField key/value store
/// key values are build by joining all path names with "_", where path names are:
/// * keys for objects ;
/// * indices for lists.
///
/// Scalar values are converted to string, with lowercase booleans and null values.
fn flatten_json(
depth_budget: usize,
args: &mut RequestField,
prefix: &mut Vec<String>,
value: Value,
) -> Result<(), ()> {
if depth_budget == 0 {
return Err(());
}
match value {
Value::Array(array) => {
prefix.push(String::new());
let idx = prefix.len() - 1;
for (i, v) in array.into_iter().enumerate() {
prefix[idx] = format!("{}", i);
flatten_json(depth_budget - 1, args, prefix, v)?;
}
prefix.pop();
}
Value::Object(mp) => {
prefix.push(String::new());
let idx = prefix.len() - 1;
for (k, v) in mp.into_iter() {
prefix[idx] = k;
flatten_json(depth_budget - 1, args, prefix, v)?;
}
prefix.pop();
}
Value::String(str) => {
args.add(json_path(prefix), DataSource::FromBody, str);
}
Value::Bool(b) => {
args.add(
json_path(prefix),
DataSource::FromBody,
(if b { "true" } else { "false" }).to_string(),
);
}
Value::Number(n) => {
args.add(json_path(prefix), DataSource::FromBody, format!("{}", n));
}
Value::Null => {
args.add(json_path(prefix), DataSource::FromBody, "null".to_string());
}
}
Ok(())
}
/// This should work with a stream of json items, not deserialize all at once
///
/// I tried qjsonrs, but it was approximatively 10x slower for small maps (but faster with larger maps)
/// qjronsrs -> serde_json benches:
/// * map/1 -> -98.83%
/// * map/100 -> -43.516%
/// * map/10000 -> +33.534%
///
/// next idea: adapting https://github.com/Geal/nom/blob/master/examples/json_iterator.rs
fn json_body(mxdepth: usize, args: &mut RequestField, body: &[u8]) -> Result<(), String> {
let value: Value = serde_json::from_slice(body).map_err(|rr| format!("Invalid JSON body: {}", rr))?;
let mut prefix = Vec::new();
flatten_json(mxdepth, args, &mut prefix, value).map_err(|()| format!("JSON nesting level exceeded: {}", mxdepth))
}
/// builds the XML path for a given stack, by appending key names with their indices
fn xml_path(stack: &[(String, u64)]) -> String {
let mut out = String::new();
for (s, i) in stack {
out += s;
// if i == 0, this means we are working with xml attributes
if *i > 0 {
out.extend(format!("{}", i).chars());
}
}
out
}
/// pop the stack and checks for errors when closing an element
fn close_xml_element(
args: &mut RequestField,
stack: &mut Vec<(String, u64)>,
close_name: Option<&str>,
) -> Result<(), String> {
match stack.pop() {
None => {
return Err(format!("Invalid XML, extraneous element end: {:?}", close_name));
}
Some((openname, idx)) => {
if let Some(local) = close_name {
if openname != local {
return Err(format!(
"Invalid XML, wrong closing element. Expected: {}, got {}",
openname, local
));
}
}
if idx == 0 {
// empty XML element, save it with an empty string
let path = xml_path(stack) + openname.as_str() + "1";
args.add(path, DataSource::FromBody, String::new());
}
Ok(())
}
}
}
fn xml_increment_last(stack: &mut [(String, u64)]) -> u64 {
if let Some(curtop) = stack.last_mut() {
let prev = curtop.1;
curtop.1 = prev + 1;
return prev;
}
0
}
fn xml_external_id(args: &mut RequestField, stack: &[(String, u64)], name: &str, me: Option<ExternalId>) {
match me {
Some(ExternalId::System(spn)) => {
let path = xml_path(stack) + "entity/" + name;
args.add(path, DataSource::FromBody, "SYSTEM ".to_string() + spn.as_str());
let path_raw = xml_path(stack) + "entity_raw/" + name;
args.add(
path_raw,
DataSource::FromBody,
"<!DOCTYPE ".to_string() + name + " SYSTEM \"" + spn.as_str() + "\"",
);
}
Some(ExternalId::Public(spn1, spn2)) => {
let path = xml_path(stack) + "entity/" + name;
args.add(
path,
DataSource::FromBody,
"PUBLIC ".to_string() + spn1.as_str() + " " + spn2.as_str(),
);
let path_raw = xml_path(stack) + "entity_raw/" + name;
args.add(
path_raw,
DataSource::FromBody,
"<!DOCTYPE ".to_string() + name + " PUBLIC \"" + spn1.as_str() + "\" \"" + spn2.as_str() + "\"",
);
}
None => (),
}
}
/// Parses the XML body by iterating on the token stream
///
/// This checks the following errors, in addition to the what the lexer gets:
/// * mismatched opening and closing tags
/// * premature end of document
fn xml_body(mxdepth: usize, args: &mut RequestField, body: &[u8]) -> Result<(), String> {
let body_utf8 = String::from_utf8_lossy(body);
let mut stack: Vec<(String, u64)> = Vec::new();
for rtoken in xmlparser::Tokenizer::from(body_utf8.as_ref()) {
if stack.len() >= mxdepth {
return Err(format!("XML nesting level exceeded: {}", mxdepth));
}
let token = rtoken.map_err(|rr| format!("XML parsing error: {}", rr))?;
match token {
Token::ProcessingInstruction { .. } => (),
Token::Comment { .. } => (),
Token::Declaration { .. } => (),
Token::DtdStart { external_id, name, .. } => xml_external_id(args, &stack, name.as_str(), external_id),
Token::DtdEnd { .. } => (),
Token::EmptyDtd { external_id, name, .. } => xml_external_id(args, &stack, name.as_str(), external_id),
Token::EntityDeclaration { name, definition, .. } => match definition {
EntityDefinition::EntityValue(span) => args.add(
"_XMLENTITY_VALUE_".to_string() + name.as_str(),
DataSource::FromBody,
span.to_string(),
),
EntityDefinition::ExternalId(eid) => xml_external_id(args, &stack, "entity", Some(eid)),
},
Token::ElementStart { local, .. } => {
// increment element index for the current element
xml_increment_last(&mut stack);
// and push the new element
stack.push((local.to_string(), 0))
}
Token::ElementEnd { end, .. } => match end {
// <foo/>
ElementEnd::Empty => close_xml_element(args, &mut stack, None)?,
// <foo>
ElementEnd::Open => (),
// </foo>
ElementEnd::Close(_, local) => close_xml_element(args, &mut stack, Some(local.as_str()))?,
},
Token::Attribute { local, value, .. } => {
let path = xml_path(&stack) + local.as_str();
args.add(path, DataSource::FromBody, value.to_string());
}
Token::Text { text } => {
let trimmed = text.trim();
if !trimmed.is_empty() {
xml_increment_last(&mut stack);
args.add(xml_path(&stack), DataSource::FromBody, trimmed.to_string());
}
}
Token::Cdata { text, .. } => {
xml_increment_last(&mut stack);
args.add(xml_path(&stack), DataSource::FromBody, text.to_string());
}
}
}
if stack.is_empty() {
Ok(())
} else {
Err("XML error: premature end of document".to_string())
}
}
/// parses bodies that are url encoded forms, like query params
fn forms_body(args: &mut RequestField, body: &[u8]) -> Result<(), String> {
// TODO: body is traversed twice here, this is inefficient
if body.contains(&b'=') && body.iter().all(|x| *x > 0x20 && *x < 0x7f) {
parse_urlencoded_params_bytes(args, body);
Ok(())
} else {
Err("Body is not forms encoded".to_string())
}
}
/// reuses the multipart crate to parse these bodies
///
/// will not work properly with binary data
fn multipart_form_encoded(boundary: &str, args: &mut RequestField, body: &[u8]) -> Result<(), String> {
let mut multipart = Multipart::with_body(body, boundary);
multipart
.foreach_entry(|mut entry| {
let mut content = Vec::new();
let _ = entry.data.read_to_end(&mut content);
let name = entry.headers.name.to_string();
let scontent = String::from_utf8_lossy(&content);
args.add(name, DataSource::FromBody, scontent.to_string());
})
.map_err(|rr| format!("Could not parse multipart body: {}", rr))
}
/// body parsing function, returns an error when the body can't be decoded
pub fn parse_body(
logs: &mut Logs,
args: &mut RequestField,
max_depth: usize,
mcontent_type: Option<&str>,
accepted_types: &[ContentType],
body: &[u8],
) -> Result<(), String> {
logs.debug("body parsing started");
if max_depth == 0 {
logs.warning("max_depth is 0, body parsing avoided");
return Ok(());
}
let active_accepted_types = if accepted_types.is_empty() {
&ContentType::VALUES
} else {
accepted_types
};
if let Some(content_type) = mcontent_type {
for t in active_accepted_types {
match t {
ContentType::Graphql => {
if content_type == "application/graphql" {
return graphql::graphql_body(max_depth, args, body);
}
}
ContentType::Json => {
if content_type.ends_with("/json") {
return json_body(max_depth, args, body);
}
}
ContentType::MultipartForm => {
if let Some(boundary) = content_type.strip_prefix("multipart/form-data; boundary=") {
return multipart_form_encoded(boundary, args, body);
}
}
ContentType::Xml => {
if content_type.ends_with("/xml") {
return xml_body(max_depth, args, body);
}
}
ContentType::UrlEncoded => {
if content_type == "application/x-www-form-urlencoded" {
return forms_body(args, body);
}
}
}
}
}
logs.debug("content-type based body parsing failed");
// content-type not found
if accepted_types.is_empty() {
// we had no particular expection, so blindly try json, and urlencoded
json_body(max_depth, args, body).or_else(|_| forms_body(args, body))
} else {
// we expected a specific content type!
Err(format!(
"Invalid content type={:?}, accepted types={:?}",
mcontent_type, accepted_types
))
}
}
pub fn body_too_deep(expected: usize, actual: usize) -> Action {
Action {
atype: ActionType::Block,
block_mode: true,
ban: false,
status: 403,
headers: None,
reason: json!({
"initiator": "body_max_depth",
"expected": expected,
"actual": actual
}),
content: "Access denied".to_string(),
extra_tags: None,
}
}
pub fn body_too_large(expected: usize, actual: usize) -> Action {
Action {
atype: ActionType::Block,
block_mode: true,
ban: false,
status: 403,
headers: None,
reason: json!({
"initiator": "body_max_size",
"expected": expected,
"actual": actual
}),
content: "Access denied".to_string(),
extra_tags: None,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::contentfilter::Transformation;
use crate::logs::LogLevel;
fn test_parse_ok_dec(
dec: &[Transformation],
mcontent_type: Option<&str>,
accepted_types: &[ContentType],
body: &[u8],
max_depth: usize,
) -> RequestField {
let mut logs = Logs::default();
let mut args = RequestField::new(dec);
parse_body(&mut logs, &mut args, max_depth, mcontent_type, accepted_types, body).unwrap();
for lg in logs.logs {
if lg.level > LogLevel::Debug {
panic!("unexpected log: {:?}", lg);
}
}
args
}
fn test_parse_bad(mcontent_type: Option<&str>, accepted_types: &[ContentType], body: &[u8], max_depth: usize) {
let mut logs = Logs::default();
let mut args = RequestField::new(&[]);
assert!(parse_body(&mut logs, &mut args, max_depth, mcontent_type, accepted_types, body).is_err());
}
fn test_parse_dec(
dec: &[Transformation],
mcontent_type: Option<&str>,
accepted_types: &[ContentType],
body: &[u8],
expected: &[(&str, &str)],
) {
let args = test_parse_ok_dec(dec, mcontent_type, accepted_types, body, 500);
for (k, v) in expected {
match args.get_str(k) {
None => panic!("Argument not set {}", k),
Some(actual) => assert_eq!(actual, *v),
}
}
if args.len() != expected.len() {
println!("Spurious arguments:");
for (k, v) in args.iter() {
if !expected.iter().any(|(ek, _)| ek == &k) {
println!(" ({:?}, {:?}),", k, v);
}
}
panic!("Spurious arguments");
}
}
fn test_parse(mcontent_type: Option<&str>, body: &[u8], expected: &[(&str, &str)]) {
test_parse_dec(&[], mcontent_type, &[], body, expected)
}
#[test]
fn json_empty_body() {
test_parse(Some("application/json"), br#"{}"#, &[]);
}
#[test]
fn json_scalar() {
test_parse(Some("application/json"), br#""scalar""#, &[("JSON_ROOT", "scalar")]);
}
#[test]
fn json_scalar_b64() {
test_parse_dec(
&[Transformation::Base64Decode],
Some("application/json"),
&[],
br#""c2NhbGFyIQ==""#,
&[("JSON_ROOT", "c2NhbGFyIQ=="), ("JSON_ROOT:decoded", "scalar!")],
);
}
#[test]
fn json_simple_object() {
test_parse(
Some("application/json"),
br#"{"a": "b", "c": "d"}"#,
&[("a", "b"), ("c", "d")],
);
}
#[test]
fn json_bad() {
test_parse_bad(Some("application/json"), &[], br#"{"a": "b""#, 500);
}
#[test]
fn json_collision() {
test_parse(
Some("application/json"),
br#"{"a": {"b": "1"}, "a_b": "2"}"#,
&[("a_b", "1 2")],
);
}
#[test]
fn json_simple_array() {
test_parse(Some("application/json"), br#"["a", "b"]"#, &[("0", "a"), ("1", "b")]);
}
#[test]
fn json_nested_objects() {
test_parse(
Some("application/json"),
br#"{"a": [true,null,{"z": 0.2}], "c": {"d": 12}}"#,
&[("a_0", "true"), ("a_1", "null"), ("a_2_z", "0.2"), ("c_d", "12")],
);
}
#[test]
fn arguments_collision() {
let mut logs = Logs::default();
let mut args = RequestField::new(&[]);
args.add("a".to_string(), DataSource::FromBody, "query_arg".to_string());
parse_body(
&mut logs,
&mut args,
500,
Some("application/json"),
&[],
br#"{"a": "body_arg"}"#,
)
.unwrap();
assert_eq!(args.get_str("a"), Some("query_arg body_arg"));
}
#[test]
fn xml_simple() {
test_parse(Some("text/xml"), br#"<a>content</a>"#, &[("a1", "content")]);
}
#[test]
fn xml_simple_b64() {
test_parse_dec(
&[Transformation::Base64Decode],
Some("text/xml"),
&[],
br#"<a>ZHFzcXNkcXNk</a>"#,
&[("a1", "ZHFzcXNkcXNk"), ("a1:decoded", "dqsqsdqsd")],
);
}
#[test]
fn xml_simple_html() {
test_parse_dec(
&[Transformation::HtmlEntitiesDecode],
Some("text/xml"),
&[],
br#"<a><em></a>"#,
&[("a1", "<em>"), ("a1:decoded", "<em>")],
);
}
#[test]
fn xml_simple_html_partial() {
test_parse_dec(
&[Transformation::HtmlEntitiesDecode],
Some("text/xml"),
&[],
br#"<a><em></a>"#,
&[("a1", "<em>"), ("a1:decoded", "<em>")],
);
}
#[test]
fn xml_bad1() {
test_parse_bad(Some("text/xml"), &[], br#"<a>"#, 500);
}
#[test]
fn xml_bad2() {
test_parse_bad(Some("text/xml"), &[], br#"<a>x</b>"#, 500);
}
#[test]
fn xml_bad3() {
test_parse_bad(Some("text/xml"), &[], br#"<a 1x="12">x</a>"#, 500);
}
#[test]
fn xml_nested() {
test_parse(
Some("text/xml"),
br#"<a>a<b foo="bar">xxx</b>z</a>"#,
&[("a1", "a"), ("a3", "z"), ("a2bfoo", "bar"), ("a2b1", "xxx")],
);
}
#[test]
fn xml_cdata() {
test_parse(
Some("text/xml"),
br#"<a ><![CDATA[ <script>alert("test");</script> ]]></a >"#,
&[("a1", r#" <script>alert("test");</script> "#)],
);
}
#[test]
fn xml_nested_empty() {
test_parse(Some("text/xml"), br#"<a><b><c></c></b></a>"#, &[("a1b1c1", "")]);
}
#[test]
fn xml_nested_empty_b() {
test_parse(
Some("application/xml"),
br#"<a> <b> <c> </c></b></a>"#,
&[("a1b1c1", "")],
);
}
#[test]
fn xml_entity_a() {
test_parse(
Some("application/xml"),
br#"<!DOCTYPE foo [ <!ENTITY myentity "my entity value" > ]><a>xx</a>"#,
&[("a1", "xx"), ("_XMLENTITY_VALUE_myentity", "my entity value")],
);
}
#[test]
fn xml_entity_b() {
test_parse(
Some("application/xml"),
br#"<!DOCTYPE foo [ <!ENTITY ext SYSTEM "http://website.com" > ]><a>xx</a>"#,
&[
("a1", "xx"),
("entity_raw/entity", "<!DOCTYPE entity SYSTEM \"http://website.com\""),
("entity/entity", "SYSTEM http://website.com"),
],
);
}
#[test]
fn xml_spaces() {
test_parse(
Some("text/xml"),
br#"<a>a <b><c> c </c> </b> </a>"#,
&[("a1", "a"), ("a2b1c1", "c")],
);
}
#[test]
fn xml_space_in_attribute() {
test_parse(
Some("application/xml"),
br#"<a foo1=" ab c "><foo>abc</foo></a>"#,
&[("afoo1", " ab c "), ("a1foo1", "abc")],
);
}
#[test]
fn xml_indent() {
test_parse(
Some("text/xml"),
br#"
<a>x1
<b>x2</b>
</a>
"#,
&[("a1", "x1"), ("a2b1", "x2")],
);
}
#[test]
fn xml_indent_too_deep() {
test_parse_bad(Some("text/xml"), &[], br#"<a>x1<b>x2</b></a>"#, 2);
}
#[test]
fn xml_indent_depth_ok() {
test_parse_ok_dec(&[], Some("text/xml"), &[], br#"<a>x1<b>x2</b></a>"#, 3);
}
#[test]
fn multipart() {
let content = [
"--------------------------28137e3917e320b3",
"Content-Disposition: form-data; name=\"foo\"",
"",
"bar",
"--------------------------28137e3917e320b3",
"Content-Disposition: form-data; name=\"baz\"",
"",
"qux",
"--------------------------28137e3917e320b3--",
"",
];
test_parse(
Some("multipart/form-data; boundary=------------------------28137e3917e320b3"),
content.join("\r\n").as_bytes(),
&[("foo", "bar"), ("baz", "qux")],
);
}
#[test]
fn urlencoded() {
test_parse(
Some("application/x-www-form-urlencoded"),
b"a=1&b=2&c=3",
&[("a", "1"), ("b", "2"), ("c", "3")],
);
}
#[test]
fn urlencoded_default() {
test_parse(None, b"a=1&b=2&c=3", &[("a", "1"), ("b", "2"), ("c", "3")]);
}
#[test]
fn json_default() {
test_parse(None, br#"{"a": "b", "c": "d"}"#, &[("a", "b"), ("c", "d")]);
}
#[test]
fn json_but_expect_json_ct() {
test_parse_dec(
&[],
Some("application/json"),
&[ContentType::Json],
br#"{"a": "b", "c": "d"}"#,
&[("a", "b"), ("c", "d")],
);
}
#[test]
fn json_but_expect_json_noct() {
test_parse_bad(None, &[ContentType::Json], br#"{"a": "b", "c": "d"}"#, 500);
}
#[test]
fn json_but_expect_xml_ct() {
test_parse_bad(Some("text/xml"), &[ContentType::Json], br#"{"a": "b", "c": "d"}"#, 500);
}
#[test]
fn json_but_expect_xml_noct() {
test_parse_bad(None, &[ContentType::Json], br#"{"a": "b", "c": "d"}"#, 500);
}
#[test]
fn json_but_expect_json_xml_ct() {
test_parse_dec(
&[],
Some("application/json"),
&[ContentType::Xml, ContentType::Json],
br#"{"a": "b", "c": "d"}"#,
&[("a", "b"), ("c", "d")],
);
}
#[test]
fn graphql_simple() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"{ hero { name } }"#,
&[("gdir-s0-hero-s0", "name")],
);
}
#[test]
fn graphql_alias() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"{ empireHero: hero(episode: EMPIRE) { name } jediHero: hero(episode: JEDI) { name } }"#,
&[
("gdir-s0-hero-episode", "EMPIRE"),
("gdir-s1-hero-episode", "JEDI"),
("gdir-s0-hero-s0", "name"),
("gdir-s0-hero-alias", "empireHero"),
("gdir-s1-hero-alias", "jediHero"),
("gdir-s1-hero-s0", "name"),
],
);
}
#[test]
fn graphql_fragvars() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"query HeroComparison($first: Int = 3) {
leftComparison: hero(episode: EMPIRE) {
...comparisonFields
}
rightComparison: hero(episode: JEDI) {
...comparisonFields
}
}
fragment comparisonFields on Character {
name
friendsConnection(first: $first) {
totalCount
edges {
node {
name
}
}
}
}"#,
&[
("gdir-HeroComparison-s1-hero-alias", "rightComparison"),
("gdir-HeroComparison-s0-hero-s0-frag", "comparisonFields"),
("gfrag-comparisonFields-s1-friendsConnection-first", "$first"),
("gfrag-comparisonFields-s1-friendsConnection-s0", "totalCount"),
("gdir-HeroComparison-s0-hero-alias", "leftComparison"),
("gdir-HeroComparison-s0-hero-episode", "EMPIRE"),
("gdir-HeroComparison-s1-hero-episode", "JEDI"),
("gfrag-comparisonFields-s0", "name"),
(
"gfrag-comparisonFields-s1-friendsConnection-s1-edges-s0-node-s0",
"name",
),
("gdir-HeroComparison-first-defvalue", "3"),
("gdir-HeroComparison-s1-hero-s0-frag", "comparisonFields"),
],
);
}
#[test]
fn graphql_dump_schema() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"{ __schema { types { name } } }"#,
&[("gdir-s0-__schema-s0-types-s0", "name")],
);
}
#[test]
fn graphql_sqli() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"{ login( input:{user:"admin" password:"password' or 1=1 -- -"}) { success jwt } }"#,
&[
("gdir-s0-login-s1", "jwt"),
("gdir-s0-login-s0", "success"),
(
"gdir-s0-login-input",
"{user: \"admin\",password: \"password' or 1=1 -- -\"}",
),
],
);
}
#[test]
fn graphql_userselect() {
test_parse_dec(
&[],
Some("application/graphql"),
&[ContentType::Graphql],
br#"query {
allUsers(id: 1337) {
name
}
}"#,
&[("gdir-s0-allUsers-id", "1337"), ("gdir-s0-allUsers-s0", "name")],
);
}
#[test]
fn graphql_too_much_nesting() {
test_parse_bad(
Some("application/graphql"),
&[ContentType::Graphql],
br#"query {
allUsers(id: 1337) {
name
}
}"#,
2,
);
}
#[test]
fn json_indent_too_deep_array() {
test_parse_bad(Some("application/json"), &[], br#"[["a"]]"#, 2);
}
#[test]
fn json_indent_too_deep_dict() {
test_parse_bad(Some("application/json"), &[], br#"{"k":{"v":"a"}}"#, 2);
}
#[test]
fn json_indent_depth_ok() {
test_parse_ok_dec(&[], Some("application/json"), &[], br#"[["a"]]"#, 3);
}
#[test]
fn urlencoded_depth_0() {
let mut logs = Logs::default();
let mut args = RequestField::new(&[]);
parse_body(
&mut logs,
&mut args,
0,
Some("application/x-www-form-urlencoded"),
&[],
b"a=1&b=2&c=3",
)
.unwrap();
assert!(args.is_empty())
}
}
| 30.898004 | 117 | 0.474668 |
e4243aadc485bd312bf80047ad85ac64e04b86f6 | 6,224 | use std::collections::{BTreeSet, HashSet};
use std::fs::create_dir_all;
use std::marker::PhantomData;
use std::ops::Deref;
use std::path::Path;
use std::sync::Arc;
use heed::{EnvOpenOptions, RoTxn};
use milli::update::Setting;
use milli::{obkv_to_json, FieldId};
use serde_json::{Map, Value};
use error::Result;
pub use search::{default_crop_length, SearchQuery, SearchResult, DEFAULT_SEARCH_LIMIT};
pub use updates::{Checked, Facets, Settings, Unchecked};
use crate::helpers::EnvSizer;
use self::error::IndexError;
pub mod error;
pub mod update_handler;
mod dump;
mod search;
mod updates;
pub type Document = Map<String, Value>;
#[derive(Clone)]
pub struct Index(pub Arc<milli::Index>);
impl Deref for Index {
type Target = milli::Index;
fn deref(&self) -> &Self::Target {
self.0.as_ref()
}
}
impl Index {
pub fn open(path: impl AsRef<Path>, size: usize) -> Result<Self> {
create_dir_all(&path)?;
let mut options = EnvOpenOptions::new();
options.map_size(size);
let index = milli::Index::new(options, &path)?;
Ok(Index(Arc::new(index)))
}
pub fn settings(&self) -> Result<Settings<Checked>> {
let txn = self.read_txn()?;
self.settings_txn(&txn)
}
pub fn settings_txn(&self, txn: &RoTxn) -> Result<Settings<Checked>> {
let displayed_attributes = self
.displayed_fields(txn)?
.map(|fields| fields.into_iter().map(String::from).collect());
let searchable_attributes = self
.searchable_fields(txn)?
.map(|fields| fields.into_iter().map(String::from).collect());
let filterable_attributes = self.filterable_fields(txn)?.into_iter().collect();
let sortable_attributes = self.sortable_fields(txn)?.into_iter().collect();
let criteria = self
.criteria(txn)?
.into_iter()
.map(|c| c.to_string())
.collect();
let stop_words = self
.stop_words(txn)?
.map(|stop_words| -> Result<BTreeSet<_>> {
Ok(stop_words.stream().into_strs()?.into_iter().collect())
})
.transpose()?
.unwrap_or_else(BTreeSet::new);
let distinct_field = self.distinct_field(txn)?.map(String::from);
// in milli each word in the synonyms map were split on their separator. Since we lost
// this information we are going to put space between words.
let synonyms = self
.synonyms(txn)?
.iter()
.map(|(key, values)| {
(
key.join(" "),
values.iter().map(|value| value.join(" ")).collect(),
)
})
.collect();
Ok(Settings {
displayed_attributes: match displayed_attributes {
Some(attrs) => Setting::Set(attrs),
None => Setting::Reset,
},
searchable_attributes: match searchable_attributes {
Some(attrs) => Setting::Set(attrs),
None => Setting::Reset,
},
filterable_attributes: Setting::Set(filterable_attributes),
sortable_attributes: Setting::Set(sortable_attributes),
ranking_rules: Setting::Set(criteria),
stop_words: Setting::Set(stop_words),
distinct_attribute: match distinct_field {
Some(field) => Setting::Set(field),
None => Setting::Reset,
},
synonyms: Setting::Set(synonyms),
_kind: PhantomData,
})
}
pub fn retrieve_documents<S: AsRef<str>>(
&self,
offset: usize,
limit: usize,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<Vec<Map<String, Value>>> {
let txn = self.read_txn()?;
let fields_ids_map = self.fields_ids_map(&txn)?;
let fields_to_display =
self.fields_to_display(&txn, &attributes_to_retrieve, &fields_ids_map)?;
let iter = self.documents.range(&txn, &(..))?.skip(offset).take(limit);
let mut documents = Vec::new();
for entry in iter {
let (_id, obkv) = entry?;
let object = obkv_to_json(&fields_to_display, &fields_ids_map, obkv)?;
documents.push(object);
}
Ok(documents)
}
pub fn retrieve_document<S: AsRef<str>>(
&self,
doc_id: String,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<Map<String, Value>> {
let txn = self.read_txn()?;
let fields_ids_map = self.fields_ids_map(&txn)?;
let fields_to_display =
self.fields_to_display(&txn, &attributes_to_retrieve, &fields_ids_map)?;
let internal_id = self
.external_documents_ids(&txn)?
.get(doc_id.as_bytes())
.ok_or_else(|| IndexError::DocumentNotFound(doc_id.clone()))?;
let document = self
.documents(&txn, std::iter::once(internal_id))?
.into_iter()
.next()
.map(|(_, d)| d)
.ok_or(IndexError::DocumentNotFound(doc_id))?;
let document = obkv_to_json(&fields_to_display, &fields_ids_map, document)?;
Ok(document)
}
pub fn size(&self) -> u64 {
self.env.size()
}
fn fields_to_display<S: AsRef<str>>(
&self,
txn: &heed::RoTxn,
attributes_to_retrieve: &Option<Vec<S>>,
fields_ids_map: &milli::FieldsIdsMap,
) -> Result<Vec<FieldId>> {
let mut displayed_fields_ids = match self.displayed_fields_ids(txn)? {
Some(ids) => ids.into_iter().collect::<Vec<_>>(),
None => fields_ids_map.iter().map(|(id, _)| id).collect(),
};
let attributes_to_retrieve_ids = match attributes_to_retrieve {
Some(attrs) => attrs
.iter()
.filter_map(|f| fields_ids_map.id(f.as_ref()))
.collect::<HashSet<_>>(),
None => fields_ids_map.iter().map(|(id, _)| id).collect(),
};
displayed_fields_ids.retain(|fid| attributes_to_retrieve_ids.contains(fid));
Ok(displayed_fields_ids)
}
}
| 31.276382 | 94 | 0.573265 |
ff54dfc867ed63eec850578c702403a0d417e326 | 25,368 | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
mod fixture;
mod util;
use tidb_query_datatype::FieldTypeTp;
use tipb::{ExprType, ScalarFuncSig};
use tipb_helper::ExprDefBuilder;
use crate::util::executor_descriptor::*;
use crate::util::store::*;
use crate::util::BenchCase;
use test_coprocessor::*;
use tikv::storage::RocksEngine;
/// SELECT COUNT(1) FROM Table, or SELECT COUNT(PrimaryKey) FROM Table
fn bench_select_count_1(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = crate::table_scan::fixture::table_with_2_columns(input.rows);
// TODO: Change to use `DAGSelect` helper when it no longer place unnecessary columns.
let executors = &[
table_scan(&[table["id"].as_column_info()]),
simple_aggregate(&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
]),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(column) FROM Table
fn bench_select_count_col(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = crate::table_scan::fixture::table_with_2_columns(input.rows);
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
simple_aggregate(&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.build(),
]),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT column FROM Table WHERE column
fn bench_select_where_col(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = crate::table_scan::fixture::table_with_2_columns(input.rows);
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
selection(&[ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build()]),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
fn bench_select_col_where_fn_impl(selectivity: f64, b: &mut criterion::Bencher, input: &Input) {
let (table, store) = crate::table_scan::fixture::table_with_2_columns(input.rows);
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
selection(&[
ExprDefBuilder::scalar_func(ScalarFuncSig::GtInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(
(input.rows as f64 * selectivity) as i64,
))
.build(),
]),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT column FROM Table WHERE column > X (selectivity = 5%)
fn bench_select_col_where_fn_sel_l(b: &mut criterion::Bencher, input: &Input) {
bench_select_col_where_fn_impl(0.05, b, input);
}
/// SELECT column FROM Table WHERE column > X (selectivity = 50%)
fn bench_select_col_where_fn_sel_m(b: &mut criterion::Bencher, input: &Input) {
bench_select_col_where_fn_impl(0.5, b, input);
}
/// SELECT column FROM Table WHERE column > X (selectivity = 95%)
fn bench_select_col_where_fn_sel_h(b: &mut criterion::Bencher, input: &Input) {
bench_select_col_where_fn_impl(0.95, b, input);
}
fn bench_select_count_1_where_fn_impl(selectivity: f64, b: &mut criterion::Bencher, input: &Input) {
let (table, store) = crate::table_scan::fixture::table_with_2_columns(input.rows);
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
selection(&[
ExprDefBuilder::scalar_func(ScalarFuncSig::GtInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(
(input.rows as f64 * selectivity) as i64,
))
.build(),
]),
simple_aggregate(&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
]),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table WHERE column > X (selectivity = 5%)
fn bench_select_count_1_where_fn_sel_l(b: &mut criterion::Bencher, input: &Input) {
bench_select_count_1_where_fn_impl(0.05, b, input);
}
/// SELECT COUNT(1) FROM Table WHERE column > X (selectivity = 50%)
fn bench_select_count_1_where_fn_sel_m(b: &mut criterion::Bencher, input: &Input) {
bench_select_count_1_where_fn_impl(0.5, b, input);
}
/// SELECT COUNT(1) FROM Table WHERE column > X (selectivity = 95%)
fn bench_select_count_1_where_fn_sel_h(b: &mut criterion::Bencher, input: &Input) {
bench_select_count_1_where_fn_impl(0.95, b, input);
}
fn bench_select_count_1_group_by_int_col_impl(
table: Table,
store: Store<RocksEngine>,
b: &mut criterion::Bencher,
input: &Input,
) {
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
hash_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build()],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col (2 groups)
fn bench_select_count_1_group_by_int_col_group_few(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_two_groups(input.rows);
bench_select_count_1_group_by_int_col_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col (n groups, n = row_count)
fn bench_select_count_1_group_by_int_col_group_many(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_n_groups(input.rows);
bench_select_count_1_group_by_int_col_impl(table, store, b, input);
}
fn bench_select_count_1_group_by_int_col_stream_impl(
table: Table,
store: Store<RocksEngine>,
b: &mut criterion::Bencher,
input: &Input,
) {
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
stream_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build()],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col (2 groups, stream aggregation)
fn bench_select_count_1_group_by_int_col_group_few_stream(
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_int_column_two_groups_ordered(input.rows);
bench_select_count_1_group_by_int_col_stream_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col (n groups, n = row_count, stream aggregation)
fn bench_select_count_1_group_by_int_col_group_many_stream(
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_int_column_n_groups(input.rows);
bench_select_count_1_group_by_int_col_stream_impl(table, store, b, input);
}
fn bench_select_count_1_group_by_fn_impl(
table: Table,
store: Store<RocksEngine>,
b: &mut criterion::Bencher,
input: &Input,
) {
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
hash_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[
ExprDefBuilder::scalar_func(ScalarFuncSig::PlusInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col + 1 (2 groups)
fn bench_select_count_1_group_by_fn_group_few(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_two_groups(input.rows);
bench_select_count_1_group_by_fn_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col + 1 (n groups, n = row_count)
fn bench_select_count_1_group_by_fn_group_many(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_n_groups(input.rows);
bench_select_count_1_group_by_fn_impl(table, store, b, input);
}
fn bench_select_count_1_group_by_2_col_impl(
table: Table,
store: Store<RocksEngine>,
b: &mut criterion::Bencher,
input: &Input,
) {
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
hash_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[
ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build(),
ExprDefBuilder::scalar_func(ScalarFuncSig::PlusInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col, int_col + 1 (2 groups)
fn bench_select_count_1_group_by_2_col_group_few(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_two_groups(input.rows);
bench_select_count_1_group_by_2_col_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col, int_col + 1 (n groups, n = row_count)
fn bench_select_count_1_group_by_2_col_group_many(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_n_groups(input.rows);
bench_select_count_1_group_by_2_col_impl(table, store, b, input);
}
fn bench_select_count_1_group_by_2_col_stream_impl(
table: Table,
store: Store<RocksEngine>,
b: &mut criterion::Bencher,
input: &Input,
) {
let executors = &[
table_scan(&[table["foo"].as_column_info()]),
stream_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[
ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build(),
ExprDefBuilder::scalar_func(ScalarFuncSig::PlusInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col, int_col + 1 (2 groups, stream aggregation)
fn bench_select_count_1_group_by_2_col_group_few_stream(b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_int_column_two_groups_ordered(input.rows);
bench_select_count_1_group_by_2_col_stream_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table GROUP BY int_col, int_col + 1 (n groups, n = row_count, stream aggregation)
fn bench_select_count_1_group_by_2_col_group_many_stream(
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_int_column_n_groups(input.rows);
bench_select_count_1_group_by_2_col_stream_impl(table, store, b, input);
}
/// SELECT COUNT(1) FROM Table WHERE id > X GROUP BY int_col (2 groups, selectivity = 5%)
fn bench_select_count_1_where_fn_group_by_int_col_group_few_sel_l(
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_int_column_two_groups(input.rows);
let executors = &[
table_scan(&[table["id"].as_column_info(), table["foo"].as_column_info()]),
selection(&[
ExprDefBuilder::scalar_func(ScalarFuncSig::GtInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(
(input.rows as f64 * 0.05) as i64,
))
.build(),
]),
hash_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong).build()],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT COUNT(1) FROM Table WHERE id > X GROUP BY int_col
/// (2 groups, selectivity = 5%, stream aggregation)
fn bench_select_count_1_where_fn_group_by_int_col_group_few_sel_l_stream(
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_int_column_two_groups_ordered(input.rows);
let executors = &[
table_scan(&[table["id"].as_column_info(), table["foo"].as_column_info()]),
selection(&[
ExprDefBuilder::scalar_func(ScalarFuncSig::GtInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(
(input.rows as f64 * 0.05) as i64,
))
.build(),
]),
stream_aggregate(
&[
ExprDefBuilder::aggr_func(ExprType::Count, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::constant_int(1))
.build(),
],
&[ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong).build()],
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
fn bench_select_order_by_3_col_impl(limit: usize, b: &mut criterion::Bencher, input: &Input) {
let (table, store) = self::fixture::table_with_3_int_columns_random(input.rows);
let executors = &[
table_scan(&[
table["id"].as_column_info(),
table["col1"].as_column_info(),
table["col2"].as_column_info(),
]),
top_n(
&[
ExprDefBuilder::scalar_func(ScalarFuncSig::IntIsNull, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong))
.build(),
ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong).build(),
ExprDefBuilder::column_ref(2, FieldTypeTp::LongLong).build(),
],
&[false, false, true],
limit,
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT id, col1, col2 FROM Table ORDER BY isnull(col1), col1, col2 DESC LIMIT 10
fn bench_select_order_by_3_col_limit_small(b: &mut criterion::Bencher, input: &Input) {
bench_select_order_by_3_col_impl(10, b, input);
}
/// SELECT id, col1, col2 FROM Table ORDER BY isnull(col1), col1, col2 DESC LIMIT 4000
fn bench_select_order_by_3_col_limit_large(b: &mut criterion::Bencher, input: &Input) {
if input.rows < 4000 {
// Skipped
b.iter(|| {});
return;
}
bench_select_order_by_3_col_impl(4000, b, input);
}
fn bench_select_where_fn_order_by_3_col_impl(
limit: usize,
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = self::fixture::table_with_3_int_columns_random(input.rows);
let executors = &[
table_scan(&[
table["id"].as_column_info(),
table["col1"].as_column_info(),
table["col2"].as_column_info(),
]),
selection(&[
ExprDefBuilder::scalar_func(ScalarFuncSig::GtInt, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong))
.push_child(ExprDefBuilder::constant_int(0))
.build(),
]),
top_n(
&[
ExprDefBuilder::scalar_func(ScalarFuncSig::IntIsNull, FieldTypeTp::LongLong)
.push_child(ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong))
.build(),
ExprDefBuilder::column_ref(1, FieldTypeTp::LongLong).build(),
ExprDefBuilder::column_ref(2, FieldTypeTp::LongLong).build(),
],
&[false, false, true],
limit,
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT id, col1, col2 FROM Table WHERE id > X ORDER BY isnull(col1), col1, col2 DESC LIMIT 10
/// (selectivity = 0%)
fn bench_select_where_fn_order_by_3_col_limit_small(b: &mut criterion::Bencher, input: &Input) {
bench_select_where_fn_order_by_3_col_impl(10, b, input);
}
/// SELECT id, col1, col2 FROM Table WHERE id > X ORDER BY isnull(col1), col1, col2 DESC LIMIT 4000
/// (selectivity = 0%)
fn bench_select_where_fn_order_by_3_col_limit_large(b: &mut criterion::Bencher, input: &Input) {
if input.rows < 4000 {
// Skipped
b.iter(|| {});
return;
}
bench_select_where_fn_order_by_3_col_impl(4000, b, input);
}
fn bench_select_50_col_order_by_1_col_impl(
limit: usize,
b: &mut criterion::Bencher,
input: &Input,
) {
let (table, store) = crate::table_scan::fixture::table_with_multi_columns(input.rows, 50);
let executors = &[
table_scan(&table.columns_info()),
top_n(
&[ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong).build()],
&[false],
limit,
),
];
input
.bencher
.bench(b, executors, &[table.get_record_range_all()], &store);
}
/// SELECT * FROM Table ORDER BY col0 LIMIT 10, there are 50 columns.
fn bench_select_50_col_order_by_1_col_limit_small(b: &mut criterion::Bencher, input: &Input) {
bench_select_50_col_order_by_1_col_impl(10, b, input);
}
/// SELECT * FROM Table ORDER BY col0 LIMIT 4000, there are 50 columns.
fn bench_select_50_col_order_by_1_col_limit_large(b: &mut criterion::Bencher, input: &Input) {
if input.rows < 4000 {
// Skipped
b.iter(|| {});
return;
}
bench_select_50_col_order_by_1_col_impl(4000, b, input);
}
#[derive(Clone)]
struct Input {
rows: usize,
bencher: Box<dyn util::IntegratedBencher>,
}
impl std::fmt::Debug for Input {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}/rows={}", self.bencher.name(), self.rows)
}
}
pub fn bench(c: &mut criterion::Criterion) {
let mut inputs = vec![];
let mut rows_options = vec![5000];
if crate::util::bench_level() >= 1 {
rows_options.push(5);
}
if crate::util::bench_level() >= 2 {
rows_options.push(1);
}
let mut bencher_options: Vec<Box<dyn util::IntegratedBencher>> = vec![
Box::new(util::DAGBencher::<RocksStore>::new(false)),
Box::new(util::DAGBencher::<RocksStore>::new(true)),
];
if crate::util::bench_level() >= 2 {
let mut additional_inputs: Vec<Box<dyn util::IntegratedBencher>> = vec![
Box::new(util::NormalBencher::<MemStore>::new()),
Box::new(util::BatchBencher::<MemStore>::new()),
Box::new(util::NormalBencher::<RocksStore>::new()),
Box::new(util::BatchBencher::<RocksStore>::new()),
Box::new(util::DAGBencher::<MemStore>::new(false)),
Box::new(util::DAGBencher::<MemStore>::new(true)),
];
bencher_options.append(&mut additional_inputs);
}
for rows in &rows_options {
for bencher in &bencher_options {
inputs.push(Input {
rows: *rows,
bencher: bencher.box_clone(),
});
}
}
let mut cases = vec![
BenchCase::new("select_count_1", bench_select_count_1),
BenchCase::new("select_col_where_fn_sel_m", bench_select_col_where_fn_sel_m),
BenchCase::new(
"select_count_1_where_fn_sel_m",
bench_select_count_1_where_fn_sel_m,
),
BenchCase::new(
"select_count_1_group_by_int_col_group_few",
bench_select_count_1_group_by_int_col_group_few,
),
BenchCase::new(
"select_count_1_group_by_int_col_group_few_stream",
bench_select_count_1_group_by_int_col_group_few_stream,
),
BenchCase::new(
"select_count_1_group_by_2_col_group_few",
bench_select_count_1_group_by_2_col_group_few,
),
BenchCase::new(
"select_count_1_group_by_2_col_group_few_stream",
bench_select_count_1_group_by_2_col_group_few_stream,
),
BenchCase::new(
"select_count_1_where_fn_group_by_int_col_group_few_sel_l",
bench_select_count_1_where_fn_group_by_int_col_group_few_sel_l,
),
BenchCase::new(
"select_count_1_where_fn_group_by_int_col_group_few_sel_l_stream",
bench_select_count_1_where_fn_group_by_int_col_group_few_sel_l_stream,
),
BenchCase::new(
"select_order_by_3_col_limit_small",
bench_select_order_by_3_col_limit_small,
),
BenchCase::new(
"select_where_fn_order_by_3_col_limit_small",
bench_select_where_fn_order_by_3_col_limit_small,
),
BenchCase::new(
"select_50_col_order_by_1_col_limit_small",
bench_select_50_col_order_by_1_col_limit_small,
),
];
if crate::util::bench_level() >= 1 {
let mut additional_cases = vec![
BenchCase::new("select_count_col", bench_select_count_col),
BenchCase::new("select_col_where_fn_sel_l", bench_select_col_where_fn_sel_l),
BenchCase::new("select_col_where_fn_sel_h", bench_select_col_where_fn_sel_h),
BenchCase::new(
"select_count_1_where_fn_sel_l",
bench_select_count_1_where_fn_sel_l,
),
BenchCase::new(
"select_count_1_where_fn_sel_h",
bench_select_count_1_where_fn_sel_h,
),
BenchCase::new(
"select_count_1_group_by_fn_group_few",
bench_select_count_1_group_by_fn_group_few,
),
BenchCase::new(
"select_count_1_group_by_int_col_group_many",
bench_select_count_1_group_by_int_col_group_many,
),
BenchCase::new(
"select_count_1_group_by_int_col_group_many_stream",
bench_select_count_1_group_by_int_col_group_many_stream,
),
BenchCase::new(
"select_count_1_group_by_fn_group_many",
bench_select_count_1_group_by_fn_group_many,
),
BenchCase::new(
"select_count_1_group_by_2_col_group_many",
bench_select_count_1_group_by_2_col_group_many,
),
BenchCase::new(
"select_count_1_group_by_2_col_group_many_stream",
bench_select_count_1_group_by_2_col_group_many_stream,
),
BenchCase::new(
"select_order_by_3_col_limit_large",
bench_select_order_by_3_col_limit_large,
),
BenchCase::new(
"select_where_fn_order_by_3_col_limit_large",
bench_select_where_fn_order_by_3_col_limit_large,
),
BenchCase::new(
"select_50_col_order_by_1_col_limit_large",
bench_select_50_col_order_by_1_col_limit_large,
),
];
cases.append(&mut additional_cases);
}
if crate::util::bench_level() >= 2 {
let mut additional_cases = vec![BenchCase::new("select_where_col", bench_select_where_col)];
cases.append(&mut additional_cases);
}
cases.sort();
for case in cases {
c.bench_function_over_inputs(case.name, case.f, inputs.clone());
}
}
| 36.606061 | 106 | 0.628587 |